code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import re
import os
import six
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress,raw'.split(',')
filters = {}
def __init__(self, node, **options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty', True)
self.debug = options.get('compileDebug', False) is not False
self.filters.update(options.get('filters', {}))
self.doctypes.update(options.get('doctypes', {}))
# self.var_processor = options.get('var_processor', lambda x: x)
self.selfClosing.extend(options.get('selfClosing', []))
self.autocloseCode.extend(options.get('autocloseCode', []))
self.inlineTags.extend(options.get('inlineTags', []))
self.useRuntime = options.get('useRuntime', True)
self.extension = options.get('extension', None) or '.jade'
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
self.mixing = 0
self.variable_start_string = options.get("variable_start_string", "{{")
self.variable_end_string = options.get("variable_end_string", "}}")
if 'doctype' in self.options: self.setDoctype(options['doctype'])
self.instring = False
def var_processor(self, var):
if isinstance(var,six.string_types) and var.startswith('_ '):
var = '_("%s")'%var[2:]
return var
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
compiled = u''.join(self.buf)
if isinstance(compiled, six.binary_type):
compiled = six.text_type(compiled, 'utf8')
return compiled
def setDoctype(self, name):
self.doctype = self.doctypes.get(name or 'default',
'<!DOCTYPE %s>' % name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer(self, str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx - 1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def visit(self, node, *args, **kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node, *args, **kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self, node, *args, **kwargs):
name = node.__class__.__name__
if self.instring and name != 'Tag':
self.buffer('\n')
self.instring = False
return getattr(self, 'visit%s' % name)(node, *args, **kwargs)
def visitLiteral(self, node):
self.buffer(node.str)
def visitBlock(self, block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self, block):
self.buffer('{%% block %s %%}' % block.name)
if block.mode=='prepend':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.visitBlock(block)
if block.mode == 'append':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.buffer('{% endblock %}')
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype:
self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}' % (mixin.name, mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('%s%s(%s)%s' % (self.variable_start_string, mixin.name,
mixin.args, self.variable_end_string))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html' == name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags and not tag.inline:
self.buffer('\n' + ' ' * (self.indents - 1))
if name in self.inlineTags or tag.inline:
self.instring = False
closed = name in self.selfClosing and not self.xml
if tag.text:
t = tag.text.nodes[0]
if t.startswith(u'/'):
if len(t) > 1:
raise Exception('%s is self closing and should not have content.' % name)
closed = True
self.buffer('<%s' % name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
# empirically check if we only contain text
textOnly = tag.textOnly or not bool(len(tag.block.nodes))
self.instring = False
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not textOnly:
self.buffer('\n' + ' ' * (self.indents-1))
self.buffer('</%s>' % name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s"' % filter.name)
else:
raise Exception('unknown filter "%s"' % filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block, self, filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename', None)
self.buffer(fn(text, filter.attrs))
def _interpolate(self, attr, repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),
attr)
def interpolate(self, text, escape=None):
def repl(matchobj):
if escape is None:
if matchobj.group(2) == '!':
filter_string = ''
else:
filter_string = '|escape'
elif escape is True:
filter_string = '|escape'
elif escape is False:
filter_string = ''
return self.variable_start_string + matchobj.group(3) + \
filter_string + self.variable_end_string
return self.RE_INTERPOLATE.sub(repl, text)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
if self.pp:
self.buffer('\n')
def visitString(self,text):
instring = not text.inline
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.instring = instring
def visitComment(self,comment):
if not comment.buffer: return
if self.pp:
self.buffer('\n' + ' ' * (self.indents))
self.buffer('<!--%s-->' % comment.val)
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}' % (assignment.name, assignment.val))
def format_path(self,path):
has_extension = os.path.basename(path).find('.') > -1
if not has_extension:
path += self.extension
return path
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer('{%% extends "%s" %%}' % (path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer('{%% include "%s" %%}' % (path))
def visitBlockComment(self, comment):
if not comment.buffer:
return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>' % comment.val.strip() if isConditional else '<!--%s' % comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self, conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}' % TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']:
self.buf.append('{% endif %}')
def visitVar(self, var, escape=False):
var = self.var_processor(var)
return ('%s%s%s%s' % (self.variable_start_string, var,
'|escape' if escape else '', self.variable_end_string))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append('{%% %s %%}' % code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ', 1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}' % codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s|__pyjade_iter:%d %%}' % (','.join(each.keys), each.obj, len(each.keys)))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "%s__pyjade_attrs(%s)%s" % (self.variable_start_string, attrs, self.variable_end_string)
def visitDynamicAttributes(self, attrs):
buf, classes, params = [], [], {}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)' % attr['val'])
else:
pair = "('%s',(%s))" % (attr['name'], attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))" % classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]' % buf
param_string = ', '.join(['%s=%s' % (n, v) for n, v in six.iteritems(params)])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self, attrs):
temp_attrs = []
for attr in attrs:
if (not self.useRuntime and not attr['name']=='class') or attr['static']: #
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
n, v = attr['name'], attr['val']
if isinstance(v, six.string_types):
if self.useRuntime or attr['static']:
self.buf.append(' %s=%s' % (n, v))
else:
self.buf.append(' %s="%s"' % (n, self.visitVar(v)))
elif v is True:
if self.terse:
self.buf.append(' %s' % (n,))
else:
self.buf.append(' %s="%s"' % (n, n))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
@classmethod
def register_filter(cls, name, f):
cls.filters[name] = f
@classmethod
def register_autoclosecode(cls, name):
cls.autocloseCode.append(name)
#1-
| jjz/pyjade | pyjade/compiler.py | Python | mit | 14,182 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division, print_function
import logging
import warnings
import zlib
from psd_tools.utils import (read_fmt, read_pascal_string,
read_be_array, trimmed_repr, pad, synchronize)
from psd_tools.exceptions import Error
from psd_tools.constants import (Compression, Clipping, BlendMode,
ChannelID, TaggedBlock)
from psd_tools import compression
from psd_tools.debug import pretty_namedtuple
logger = logging.getLogger(__name__)
_LayerRecord = pretty_namedtuple('LayerRecord', [
'top', 'left', 'bottom', 'right',
'num_channels', 'channels',
'blend_mode', 'opacity', 'clipping', 'flags',
'mask_data', 'blending_ranges', 'name',
'tagged_blocks'
])
class LayerRecord(_LayerRecord):
def width(self):
return self.right - self.left
def height(self):
return self.bottom - self.top
Layers = pretty_namedtuple('Layers', 'length, layer_count, layer_records, channel_image_data')
LayerFlags = pretty_namedtuple('LayerFlags', 'transparency_protected visible')
LayerAndMaskData = pretty_namedtuple('LayerAndMaskData', 'layers global_mask_info tagged_blocks')
ChannelInfo = pretty_namedtuple('ChannelInfo', 'id length')
_MaskData = pretty_namedtuple('MaskData', 'top left bottom right default_color flags real_flags real_background')
LayerBlendingRanges = pretty_namedtuple('LayerBlendingRanges', 'composite_ranges channel_ranges')
_ChannelData = pretty_namedtuple('ChannelData', 'compression data')
_Block = pretty_namedtuple('Block', 'key data')
GlobalMaskInfo = pretty_namedtuple('GlobalMaskInfo', 'overlay color_components opacity kind')
class MaskData(_MaskData):
def width(self):
return self.right - self.left
def height(self):
return self.bottom - self.top
class ChannelData(_ChannelData):
def __repr__(self):
return "ChannelData(compression=%r %s, len(data)=%r)" % (
self.compression, Compression.name_of(self.compression),
len(self.data) if self.data is not None else None
)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('ChannelData(...)')
else:
p.text(repr(self))
class Block(_Block):
"""
Layer tagged block with extra info.
"""
def __repr__(self):
return "Block(%s %s, %s)" % (self.key, TaggedBlock.name_of(self.key),
trimmed_repr(self.data))
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('Block(...)')
else:
with p.group(1, 'Block(', ')'):
p.breakable()
p.text("%s %s," % (self.key, TaggedBlock.name_of(self.key)))
p.breakable()
if isinstance(self.data, bytes):
p.text(trimmed_repr(self.data))
else:
p.pretty(self.data)
def read(fp, encoding, depth):
"""
Reads layers and masks information.
"""
length = read_fmt("I", fp)[0]
start_position = fp.tell()
layers = _read_layers(fp, encoding, depth)
# XXX: are tagged blocks really after the layers?
# XXX: does global mask reading really work?
global_mask_info = _read_global_mask_info(fp)
consumed_bytes = fp.tell() - start_position
synchronize(fp) # hack hack hack
tagged_blocks = _read_layer_tagged_blocks(fp, length - consumed_bytes)
consumed_bytes = fp.tell() - start_position
fp.seek(length-consumed_bytes, 1)
return LayerAndMaskData(layers, global_mask_info, tagged_blocks)
def _read_layers(fp, encoding, depth, length=None):
"""
Reads info about layers.
"""
if length is None:
length = read_fmt("I", fp)[0]
layer_count = read_fmt("h", fp)[0]
layer_records = []
for idx in range(abs(layer_count)):
layer = _read_layer_record(fp, encoding)
layer_records.append(layer)
channel_image_data = []
for layer in layer_records:
data = _read_channel_image_data(fp, layer, depth)
channel_image_data.append(data)
return Layers(length, layer_count, layer_records, channel_image_data)
def _read_layer_record(fp, encoding):
"""
Reads single layer record.
"""
top, left, bottom, right, num_channels = read_fmt("4i H", fp)
channel_info = []
for channel_num in range(num_channels):
info = ChannelInfo(*read_fmt("hI", fp))
channel_info.append(info)
sig = fp.read(4)
if sig != b'8BIM':
raise Error("Error parsing layer: invalid signature (%r)" % sig)
blend_mode = fp.read(4).decode('ascii')
if not BlendMode.is_known(blend_mode):
warnings.warn("Unknown blend mode (%s)" % blend_mode)
opacity, clipping, flags, extra_length = read_fmt("BBBxI", fp)
flags = LayerFlags(bool(flags & 1), not bool(flags & 2)) # why not?
if not Clipping.is_known(clipping):
warnings.warn("Unknown clipping: %s" % clipping)
start = fp.tell()
mask_data = _read_layer_mask_data(fp)
blending_ranges = _read_layer_blending_ranges(fp)
name = read_pascal_string(fp, encoding, 4)
remaining_length = extra_length - (fp.tell()-start)
tagged_blocks = _read_layer_tagged_blocks(fp, remaining_length)
remaining_length = extra_length - (fp.tell()-start)
fp.seek(remaining_length, 1) # skip the reminder
return LayerRecord(
top, left, bottom, right,
num_channels, channel_info,
blend_mode, opacity, clipping, flags,
mask_data, blending_ranges, name,
tagged_blocks
)
def _read_layer_tagged_blocks(fp, remaining_length):
"""
Reads a section of tagged blocks with additional layer information.
"""
blocks = []
start_pos = fp.tell()
read_bytes = 0
while read_bytes < remaining_length:
block = _read_additional_layer_info_block(fp)
read_bytes = fp.tell() - start_pos
if block is None:
break
blocks.append(block)
return blocks
def _read_additional_layer_info_block(fp):
"""
Reads a tagged block with additional layer information.
"""
sig = fp.read(4)
if sig not in [b'8BIM', b'8B64']:
fp.seek(-4, 1)
#warnings.warn("not a block: %r" % sig)
return
key = fp.read(4)
length = pad(read_fmt("I", fp)[0], 4)
data = fp.read(length)
return Block(key, data)
def _read_layer_mask_data(fp):
""" Reads layer mask or adjustment layer data. """
size = read_fmt("I", fp)[0]
if size not in [0, 20, 36]:
warnings.warn("Invalid layer data size: %d" % size)
fp.seek(size, 1)
return
if not size:
return
top, left, bottom, right, default_color, flags = read_fmt("4i 2B", fp)
if size == 20:
fp.seek(2, 1)
real_flags, real_background = None, None
else:
real_flags, real_background = read_fmt("2B", fp)
# XXX: is it correct to prefer data at the end?
top, left, bottom, right = read_fmt("4i", fp)
return MaskData(top, left, bottom, right, default_color, flags, real_flags, real_background)
def _read_layer_blending_ranges(fp):
""" Reads layer blending data. """
def read_channel_range():
src_start, src_end, dest_start, dest_end = read_fmt("4H", fp)
return (src_start, src_end), (dest_start, dest_end)
composite_ranges = None
channel_ranges = []
length = read_fmt("I", fp)[0]
if length:
composite_ranges = read_channel_range()
for x in range(length//8 - 1):
channel_ranges.append(read_channel_range())
return LayerBlendingRanges(composite_ranges, channel_ranges)
def _read_channel_image_data(fp, layer, depth):
"""
Reads image data for all channels in a layer.
"""
channel_data = []
bytes_per_pixel = depth // 8
for channel in layer.channels:
if channel.id == ChannelID.USER_LAYER_MASK:
w, h = layer.mask_data.width(), layer.mask_data.height()
else:
w, h = layer.width(), layer.height()
start_pos = fp.tell()
compress_type = read_fmt("H", fp)[0]
data = None
if compress_type == Compression.RAW:
data_size = w * h * bytes_per_pixel
data = fp.read(data_size)
elif compress_type == Compression.PACK_BITS:
byte_counts = read_be_array("H", h, fp)
data_size = sum(byte_counts) * bytes_per_pixel
data = fp.read(data_size)
elif compress_type == Compression.ZIP:
data = zlib.decompress(fp.read(channel.length - 2))
elif compress_type == Compression.ZIP_WITH_PREDICTION:
decompressed = zlib.decompress(fp.read(channel.length - 2))
data = compression.decode_prediction(decompressed, w, h, bytes_per_pixel)
if data is None:
return []
channel_data.append(ChannelData(compress_type, data))
remaining_bytes = channel.length - (fp.tell() - start_pos) - 2
if remaining_bytes > 0:
fp.seek(remaining_bytes, 1)
return channel_data
def _read_global_mask_info(fp):
"""
Reads global layer mask info.
"""
# XXX: Does it really work properly? What is it for?
start_pos = fp.tell()
length = read_fmt("H", fp)[0]
if length:
overlay_color_space, c1, c2, c3, c4, opacity, kind = read_fmt("H 4H HB", fp)
filler_length = length - (fp.tell()-start_pos)
if filler_length > 0:
fp.seek(filler_length, 1)
return GlobalMaskInfo(overlay_color_space, (c1, c2, c3, c4), opacity, kind)
else:
return None
def read_image_data(fp, header):
"""
Reads merged image pixel data which is stored at the end of PSD file.
"""
w, h = header.width, header.height
compress_type = read_fmt("H", fp)[0]
bytes_per_pixel = header.depth // 8
channel_byte_counts = []
if compress_type == Compression.PACK_BITS:
for ch in range(header.number_of_channels):
channel_byte_counts.append(read_be_array("H", h, fp))
channel_data = []
for channel_id in range(header.number_of_channels):
data = None
if compress_type == Compression.RAW:
data_size = w * h * bytes_per_pixel
data = fp.read(data_size)
elif compress_type == Compression.PACK_BITS:
byte_counts = channel_byte_counts[channel_id]
data_size = sum(byte_counts) * bytes_per_pixel
data = fp.read(data_size)
# are there any ZIP-encoded composite images in a wild?
elif compress_type == Compression.ZIP:
warnings.warn("ZIP compression of composite image is not supported.")
elif compress_type == Compression.ZIP_WITH_PREDICTION:
warnings.warn("ZIP_WITH_PREDICTION compression of composite image is not supported.")
if data is None:
return []
channel_data.append(ChannelData(compress_type, data))
return channel_data
| a-e-m/psd-tools | src/psd_tools/reader/layers.py | Python | mit | 11,085 |
# coding: latin-1
"""
@brief test log(time=1s)
"""
import unittest
from pysqllike.generic.iter_rows import IterRow, IterException
class TestSelect (unittest.TestCase):
def test_iter_simple(self):
lr = [("nom", 10), ("jean", 40)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
lr = list(tbl)
assert len(lr) == 2
if lr != [{'nom': 'nom', 'age': 10}, {'nom': 'jean', 'age': 40}]:
raise ValueError(str(lr))
tbl = IterRow(schema, lr, as_dict=False)
lr = list(tbl)
assert len(lr) == 2
if lr != [('nom', 10), ('jean', 40)]:
raise ValueError(str(lr))
def test_iter_simple_dict(self):
l0 = [{"nom": "jean", "age": 10},
{"nom": "j", "age": 20}]
tbl = IterRow(None, l0)
lr = list(tbl)
assert len(lr) == 2
if lr != l0:
raise ValueError(str(lr))
def test_iter_simple_dict2(self):
l0 = [{"nom": "jean", "age": 10},
{"nom": "j", "age": 20}]
tbl = IterRow(None, l0)
tbl2 = tbl.select(tbl.nom)
lr = list(tbl2)
assert len(lr) == 2
if lr != [{"nom": "jean"}, {"nom": "j"}]:
raise ValueError(str(lr))
def test_select_simple(self):
lr = [("jake", 10), ("jean", 40)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
lr = list(tbl.select(tbl.nom, tbl.age))
if lr != [{'age': 10, 'nom': 'jake'}, {'age': 40, 'nom': 'jean'}]:
raise Exception(str(lr))
def test_select_simple2(self):
lr = [("nom", 10), ("jean", 40)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, age2=tbl.age * 2, age3=tbl.age * 3)
lr = list(iter)
assert len(lr) == 2
if lr != [{'nom': 'nom', 'age2': 20, 'age3': 30},
{'nom': 'jean', 'age2': 80, 'age3': 120}]:
raise Exception(str(lr))
iter = tbl.select(tbl.nom, age2=tbl.age * 2)
sch = iter.Schema
assert sch[0].Name == "nom"
assert sch[1].Name == "age2"
def test_select_simple3(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, age2=tbl.age * 2)
iter2 = iter.select(iter.nom, age4=iter.age2 * 2)
lr = list(iter2)
assert len(lr) == 3
if lr != [{'age4': 40, 'nom': 'nom'}, {
'age4': 160, 'nom': 'jean'}, {'age4': 8, 'nom': 'jeanne'}]:
raise Exception(str(lr))
sch = iter2.Schema
assert sch[0].Name == "nom"
assert sch[1].Name == "age4"
def test_select_simple_square(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, age2=tbl.age, age3=tbl.age * 0.5)
iter2 = iter.select(iter.nom, age4=iter.age2 * iter.age3)
lr = list(iter2)
assert len(lr) == 3
if lr != [{'age4': 50.0, 'nom': 'nom'}, {
'age4': 800.0, 'nom': 'jean'}, {'age4': 2.0, 'nom': 'jeanne'}]:
raise Exception(str(lr))
sch = iter2.Schema
assert sch[0].Name == "nom"
assert sch[1].Name == "age4"
def test_select_mismatch(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, age2=tbl.age, age3=tbl.age * 0.5)
try:
iter.select(iter.nom, tbl.age)
raise TypeError(
"we should not be able to reach this code due to confusion between iter and tbl")
except IterException as e:
assert "mismatch" in str(e)
# however we do not check formulas...
def test_select_operators(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, age0=tbl.age,
agem=tbl.age * 0.5,
agea=tbl.age + 0.5,
ages=tbl.age - 0.5,
agep=tbl.age ** 0.5, agedd=tbl.age // 3, agemod=tbl.age % 3
)
res = list(iter)
exp = [{'age0': 10, 'ages': 9.5, 'agemod': 1, 'agem': 5.0, 'agep': 3.1622776601683795, 'agea': 10.5, 'agedd': 3, 'nom': 'nom'},
{'age0': 40,
'ages': 39.5,
'agemod': 1,
'agem': 20.0,
'agep': 6.324555320336759,
'agea': 40.5,
'agedd': 13,
'nom': 'jean'},
{'age0': 2, 'ages': 1.5, 'agemod': 2, 'agem': 1.0, 'agep': 1.4142135623730951, 'agea': 2.5, 'agedd': 0, 'nom': 'jeanne'}]
if res != exp:
raise ValueError(str(res))
def test_select_bracket(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.select(tbl.nom, formula=tbl.age + (tbl.age + 2) / 3)
res = list(iter)
exp = [{'formula': 14.0, 'nom': 'nom'},
{'formula': 54.0, 'nom': 'jean'},
{'formula': 3.333333333333333, 'nom': 'jeanne'}]
if res != exp:
raise ValueError(str(res))
def test_where(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.where(tbl.age == 40)
res = list(iter)
exp = [{'nom': "jean", 'age': 40}]
if res != exp:
raise ValueError(str(res))
def test_where2(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.where(tbl.age >= 40)
res = list(iter)
exp = [{'nom': "jean", 'age': 40}]
if res != exp:
raise ValueError(str(res))
iter = tbl.where(tbl.age <= 2)
res = list(iter)
exp = [{'nom': "jeanne", 'age': 2}]
if res != exp:
raise ValueError(str(res))
iter = tbl.where(tbl.age < 2)
res = list(iter)
exp = []
if res != exp:
raise ValueError(str(res))
iter = tbl.where(tbl.age > 20)
res = list(iter)
exp = [{'nom': "jean", 'age': 40}]
if res != exp:
raise ValueError(str(res))
iter = tbl.where(tbl.age != 10)
res = list(iter)
assert len(res) == 2
def test_where_or(self):
lr = [("nom", 10), ("jean", 40), ("jeanne", 2)]
schema = [("nom", str), ("age", int)]
tbl = IterRow(schema, lr)
iter = tbl.where(
(tbl.age == 2).Or(
tbl.age == 40),
append_condition=True)
res = list(iter)
exp = [{'nom': 'jean', 'age': 40, '__unk__': True},
{'nom': "jeanne", 'age': 2, '__unk__': True}, ]
if res != exp:
raise ValueError(str(res) + "\n\n" + iter.print_schema())
iter = tbl.where((tbl.age == 10).Not())
res = list(iter)
assert len(res) == 2
if __name__ == "__main__":
unittest.main()
| sdpython/pysqllike | _unittests/ut_sql/test_select.py | Python | mit | 7,475 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
from numpy.random import random
import numpy as np
import espressomd
import espressomd.interactions
import espressomd.magnetostatics
import espressomd.analyze
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["DIPOLES", "ROTATION"])
class DDSGPUTest(ut.TestCase):
# Handle for espresso system
es = espressomd.System(box_l=[1.0, 1.0, 1.0])
es.seed = es.cell_system.get_state()['n_nodes'] * [1234]
def stopAll(self):
for i in range(len(self.es.part)):
self.es.part[i].v = np.array([0.0, 0.0, 0.0])
self.es.part[i].omega_body = np.array([0.0, 0.0, 0.0])
@ut.skipIf(es.cell_system.get_state()["n_nodes"] > 1,
"Skipping test: only runs for n_nodes == 1")
def test(self):
pf_dds_gpu = 2.34
pf_dawaanr = 3.524
ratio_dawaanr_dds_gpu = pf_dawaanr / pf_dds_gpu
l = 15
self.es.box_l = [l, l, l]
self.es.periodicity = [0, 0, 0]
self.es.time_step = 1E-4
self.es.cell_system.skin = 0.1
part_dip = np.zeros((3))
for n in [128, 541]:
dipole_modulus = 1.3
for i in range(n):
part_pos = np.array(random(3)) * l
costheta = 2 * random() - 1
sintheta = np.sin(np.arcsin(costheta))
phi = 2 * np.pi * random()
part_dip[0] = sintheta * np.cos(phi) * dipole_modulus
part_dip[1] = sintheta * np.sin(phi) * dipole_modulus
part_dip[2] = costheta * dipole_modulus
self.es.part.add(id=i, type=0, pos=part_pos, dip=part_dip,
v=np.array([0, 0, 0]), omega_body=np.array([0, 0, 0]))
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=10.0, sigma=0.5, cutoff=0.55, shift="auto")
self.es.thermostat.set_langevin(kT=0.0, gamma=10.0, seed=42)
self.es.integrator.set_steepest_descent(
f_max=0.0, gamma=0.1, max_displacement=0.1)
self.es.integrator.run(500)
self.stopAll()
self.es.integrator.set_vv()
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0.0)
self.es.cell_system.skin = 0.0
self.es.time_step = 0.01
self.es.thermostat.turn_off()
# gamma should be zero in order to avoid the noise term in force
# and torque
self.es.thermostat.set_langevin(kT=1.297, gamma=0.0, seed=42)
dds_cpu = espressomd.magnetostatics.DipolarDirectSumCpu(
prefactor=pf_dawaanr)
self.es.actors.add(dds_cpu)
self.es.integrator.run(steps=0, recalc_forces=True)
dawaanr_f = []
dawaanr_t = []
for i in range(n):
dawaanr_f.append(self.es.part[i].f)
dawaanr_t.append(self.es.part[i].torque_lab)
dawaanr_e = self.es.analysis.energy()["total"]
del dds_cpu
for i in range(len(self.es.actors.active_actors)):
self.es.actors.remove(self.es.actors.active_actors[i])
self.es.integrator.run(steps=0, recalc_forces=True)
dds_gpu = espressomd.magnetostatics.DipolarDirectSumGpu(
prefactor=pf_dds_gpu)
self.es.actors.add(dds_gpu)
self.es.integrator.run(steps=0, recalc_forces=True)
ddsgpu_f = []
ddsgpu_t = []
for i in range(n):
ddsgpu_f.append(self.es.part[i].f)
ddsgpu_t.append(self.es.part[i].torque_lab)
ddsgpu_e = self.es.analysis.energy()["total"]
# compare
for i in range(n):
np.testing.assert_allclose(
np.array(dawaanr_t[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_t[i]),
err_msg='Torques on particle do not match for particle {}'
.format(i), atol=3e-3)
np.testing.assert_allclose(
np.array(dawaanr_f[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_f[i]),
err_msg='Forces on particle do not match for particle i={}'
.format(i), atol=3e-3)
self.assertAlmostEqual(
dawaanr_e,
ddsgpu_e * ratio_dawaanr_dds_gpu,
places=2,
msg='Energies for dawaanr {0} and dds_gpu {1} do not match.'
.format(dawaanr_e, ratio_dawaanr_dds_gpu * ddsgpu_e))
self.es.integrator.run(steps=0, recalc_forces=True)
del dds_gpu
self.es.actors.clear()
self.es.part.clear()
if __name__ == '__main__':
ut.main()
| psci2195/espresso-ffans | testsuite/python/dawaanr-and-dds-gpu.py | Python | gpl-3.0 | 5,580 |
# -*- coding: utf-8 -*-
u"""\
FlexiNS/MME Cause code mapping Checking
- NS/MME软件版本为NS15或者更高版本
- 已开启功能开关PRFILE002:2244-MME_CC_MAPPING_ENABLED
- 有新建CAUSE CODE SETS,如:EPCEMM和EPCESM(各项目名称可能不相同).
- EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=142 To EXTERNAL CAUSE=15
- EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=96 To EXTERNAL CAUSE=7
- EPCESM,TYPE=ESM,PROC=PDNCR,INTCAUSE=165 To EXTERNAL CAUSE=33
"""
import re
from libs.checker import ResultInfo,CheckStatus
## Mandatory variables
##-----------------------------------------------------
#check_id = '20160308.TN20151013'
module_id = 'NSTNCN_20151013'
tag = ['flexins','china']
priority = 'normal'
name = "TN-China-20151013-ns_mme_cause_code_mapping"
desc = __doc__
criteria = u"""(1)检查NS/MME软件版本为 ['N5 1.19-3','N5 1.17-5'] 或者更高版本
(2)开启功能开关PRFILE002:2244-MME_CC_MAPPING_ENABLED.
(3)新建CAUSE CODE SETS 满足下面条件,如:EPCEMM和EPCESM:
- EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=142 To EXTERNAL CAUSE=15
- EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=96 To EXTERNAL CAUSE=7
- EPCESM,TYPE=ESM,PROC=PDNCR,INTCAUSE=165 To EXTERNAL CAUSE=33
"""
##-----------------------------------------------------
## Optional variables
##-----------------------------------------------------
# available target versions:
target_versions = ['N5 1.19-3','N5 1.17-5']
check_commands = [
("ZWOI:;","show the NS data of the parameters defined in the PRFILE"),
("ZWQO:CR;","show the NS packages information"),
("ZKAL:;","show the NS cause code set names"),
("ZKAL:NAME=EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=142:;","show the NS cause code set EPCEMM information,You can change the cause code set name for your project"),
("ZKAL:NAME=EPCEMM,TYPE=EMM,PROC=ATTREJ,INTCAUSE=96:;","show the NS cause code set EPCEMM information,You can change the cause code set name for your project"),
("ZKAL:NAME=EPCESM,TYPE=ESM,PROC=PDNCR,INTCAUSE=165:;","show the NS cause code set EPCESM information,You can change the cause code set name for your project")
]
know_version_identify_Patt = r"\s*(N\d+\s+\d+.\d+-\d+)\s*"
##-----------------------------------------------------
##version_up_NS15_id('N5 1.17-5')
def version_up_NS15_id(NsVersionId):
up_id = 0
version_id_Patt = r"\s*N(\d+)\s+\d+.\d+-\d+\s*"
m=re.search(version_id_Patt,NsVersionId)
if m:
big_version_id = m.group(1)
if int(big_version_id) >= 5:
up_id =1
return up_id
def is_NS15_version_id(NsVersionId,Ns15VersionList):
if NsVersionId in Ns15VersionList:
return 1
else:
return 0
def Find_NS_MME_Patt_Return_Info_List(LogFile,CommandStr,InfoPatt,ReturnInfoLen):
Command_start=False
Command_end=False
return_info_list=[]
Command_start_Patt=r"\s*[Z]?%s\s*" % (CommandStr)
Command_start_Patt=Command_start_Patt.replace(':;','[:]?;')
## print "Command_start_Patt =",Command_start_Patt
Command_end_Patt=r"\s*COMMAND\s+EXECUTED\s*$"
Find_Info_Patt=InfoPatt
## print "Find_Info_Patt =",Find_Info_Patt
return_Len = ReturnInfoLen+1
fp=open(LogFile,'r')
for line in fp.readlines():
if Command_start==False and Command_end==False:
m=re.search(Command_start_Patt,line)
if m:
Command_start=True
continue
elif Command_start==True and Command_end==False:
m0=re.search(Command_end_Patt,line)
m1=re.search(Find_Info_Patt,line)
if m0:
Command_end=True
continue
if m1:
## print "line =",line
for infoId in range(1,return_Len):
try:
return_info_list.append(m1.group(infoId))
except IndexError:
return_info_list.append('Not_Find_Infomation')
continue
else:
break
fp.close()
return return_info_list
def returnNotMatchItemInList(List_two,ItemName):
ItemName_New = ''
for item_name in List_two:
if item_name != ItemName:
ItemName_New = item_name
return ItemName_New
## Mandatory function: run
def run(logfile):
result = ResultInfo(name,priority=priority)
info = []
errmsg = ''
errid = 1
Ns_version_Patt=r"\s*\S+\s+BU\s+\S+\s+(\w+\d+\s*\S+)\s+Y\s+Y\s*$"
try :
version = Find_NS_MME_Patt_Return_Info_List(logfile,'WQO:CR;',Ns_version_Patt,1)[0]
except IndexError:
version = ''
## print "\n****Find version id is : ",version
if is_NS15_version_id(version,target_versions)>0 or version_up_NS15_id(version)>0:
result.status = CheckStatus.PASSED
info.append(u" - 检查到 NS/MME 软件版本为:'%s' ,它属于或者高于NS15版本." % version)
else:
m=re.search(know_version_identify_Patt,version)
if m:
result.status = CheckStatus.FAILED
info.append(u" - 检查到 NS/MME 软件版本为:'%s' ,它不属于或者低于NS15版本." % version)
else:
result.status = CheckStatus.UNKNOWN
info.append(u" - 检查到 NS/MME 软件版本为:'%s' ,它不属于NS/MME主流版本,请手动确认版本信息." % version)
if version == '':
errmsg = ' Have not find NS Version release identification !!'
result.update(info=info,error=errmsg)
return result
InfoPatt_mapping=r"\s*02244\s+MME_CC_MAPPING_ENABLED\s+(\S+)\s+YES\s*$"
try:
MME_CC_MAPPING_ENABLED_Value = int(Find_NS_MME_Patt_Return_Info_List(logfile,'WOI:;',InfoPatt_mapping,1)[0],16)
except IndexError:
MME_CC_MAPPING_ENABLED_Value = None
## print "MME_CC_MAPPING_ENABLED_Value = ",MME_CC_MAPPING_ENABLED_Value
if MME_CC_MAPPING_ENABLED_Value == 1:
result.status = CheckStatus.PASSED
info.append(u" - 检查到 NS/MME 功能开关PRFILE002:2244-MME_CC_MAPPING_ENABLED已开启.")
else:
result.status = CheckStatus.FAILED
info.append(u" - 检查到 NS/MME 功能开关PRFILE002:2244-MME_CC_MAPPING_ENABLED未开启.")
result.update(info=info,error=errmsg)
return result
EPCEMM_Patt = r"\s*(\S*)\s+EMM\s*$"
EPCESM_Patt = r"\s*(\S*)\s+ESM\s*$"
Cause_code_set_EMM_Name_List = Find_NS_MME_Patt_Return_Info_List(logfile,'KAL:;',EPCEMM_Patt,1)
Cause_code_set_ESM_Name_List = Find_NS_MME_Patt_Return_Info_List(logfile,'KAL:;',EPCESM_Patt,1)
Cause_code_set_EPCEMM = returnNotMatchItemInList(Cause_code_set_EMM_Name_List,'EMMDEF')
Cause_code_set_EPCESM = returnNotMatchItemInList(Cause_code_set_ESM_Name_List,'ESMDEF')
## print "Cause_code_set_EPCEMM = ",Cause_code_set_EPCEMM
## print "Cause_code_set_EPCESM = ",Cause_code_set_EPCESM
EmmIntcause142_Patt = r"\s*EXTERNAL\s+CAUSE\s+:\s+(\d+)\s*$"
EmmIntcause96_Patt = r"\s*EXTERNAL\s+CAUSE\s+:\s+(\d+)\s*$"
EsmIntcause165_Patt = r"\s*EXTERNAL\s+CAUSE\s+:\s+(\d+)\s*$"
EmmIntcause142_Command = 'KAL:NAME=%s,TYPE=EMM,PROC=ATTREJ,INTCAUSE=142:;' % (Cause_code_set_EPCEMM)
EmmIntcause96_Command = 'KAL:NAME=%s,TYPE=EMM,PROC=ATTREJ,INTCAUSE=96:;' % (Cause_code_set_EPCEMM)
EsmIntcause165_Command = 'KAL:NAME=%s,TYPE=ESM,PROC=PDNCR,INTCAUSE=165:;' % (Cause_code_set_EPCESM)
try:
EmmExternal142 = int(Find_NS_MME_Patt_Return_Info_List(logfile,EmmIntcause142_Command,EmmIntcause142_Patt,1)[0])
except IndexError:
EmmExternal142 = None
errmsg =errmsg + u"%s.未检测到命令\" %s \"的输出\n" % (errid,EmmIntcause142_Command)
errid+=1
try:
EmmExternal96= int(Find_NS_MME_Patt_Return_Info_List(logfile,EmmIntcause96_Command,EmmIntcause96_Patt,1)[0])
except IndexError:
EmmExternal96 = None
errmsg =errmsg + u"%s.未检测到命令\" %s \"的输出\n" % (errid,EmmIntcause96_Command)
errid+=1
try:
EsmExternal165 = int(Find_NS_MME_Patt_Return_Info_List(logfile,EsmIntcause165_Command,EsmIntcause165_Patt,1)[0])
except IndexError:
EsmExternal165 = None
errmsg =errmsg + u"%s.未检测到命令\" %s \"的输出\n" % (errid,EsmIntcause165_Command)
errid+=1
## print "EmmExternal142 = ",EmmExternal142
## print "EmmExternal96 = ",EmmExternal96
## print "EsmExternal165 = ",EsmExternal165
EmmExternal142_info_str = u" - 检查到 NS/MME 的CAUSE CODE SETS: %s,TYPE=EMM,PROC=ATTREJ,INTCAUSE=142 To EXTERNAL CAUSE=%s" % (Cause_code_set_EPCEMM,EmmExternal142)
EmmExternal96_info_str = u" - 检查到 NS/MME 的CAUSE CODE SETS: %s,TYPE=EMM,PROC=ATTREJ,INTCAUSE=96 To EXTERNAL CAUSE=%s" % (Cause_code_set_EPCEMM,EmmExternal96)
EsmExternal165_info_str = u" - 检查到 NS/MME 的CAUSE CODE SETS: %s,TYPE=ESM,PROC=PDNCR,INTCAUSE=165 To EXTERNAL CAUSE=%s" % (Cause_code_set_EPCESM,EsmExternal165)
info_dic = {'EmmExternal142':[15,EmmExternal142,EmmExternal142_info_str],
'EmmExternal96':[7,EmmExternal96,EmmExternal96_info_str],
'EsmExternal165':[33,EsmExternal165,EsmExternal165_info_str]
}
for check_Items in info_dic:
#print check_Items,info_dic[check_Items][1]
if info_dic[check_Items][0] == info_dic[check_Items][1]:
info.append(info_dic[check_Items][2])
else:
info.append(info_dic[check_Items][2].replace('- ','- !'))
result.status = CheckStatus.FAILED
#info=[line+"\n" for line in info]
result.update(info=info,error=errmsg)
return result
| maxli99/SmartChecker | modules/flexins/TN_CN_20151013_ns_mme_cause_code_mapping.py | Python | mit | 8,823 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mentorlol.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| amaunder21/c4tkmentors | manage.py | Python | mit | 252 |
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
import wallaby.FXUI as FXUI
from wallaby.qt_combat import *
from wallaby.frontends.qt.widgets.labels.webView import WebView
#============================================================================#
# The group name in designer widgetbox #
#----------------------------------------------------------------------------#
DESIGNER_GROUP_NAME = "wallaby@fX"
#============================================================================#
# Plugin for CTLiveView #
#----------------------------------------------------------------------------#
class WebViewPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(WebViewPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def isContainer(self):
return False
def icon(self):
return FXUI.icon()
def domXml(self):
return '<widget class="WebView" name="webView">\n</widget>\n'
def group(self):
return DESIGNER_GROUP_NAME
def includeFile(self):
return "wallaby.frontends.qt.widgets.labels.webView"
def name(self):
return "WebView"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def createWidget(self, parent):
return WebView(parent)
| FreshXOpenSource/wallaby-frontend-qt | wallaby/frontends/qt/plugins/webviewplugin.py | Python | bsd-2-clause | 1,640 |
# Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2016, Toshio Kuratomi <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import re
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import set_collection_playbook_paths
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
if op['ask_pass']:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_prompt_method
if op['become_ask_pass']:
becomepass = getpass.getpass(prompt=become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
options = self.parser.parse_args(self.args[1:])
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
set_collection_playbook_paths(basedir)
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
| t794104/ansible | lib/ansible/cli/__init__.py | Python | gpl-3.0 | 19,256 |
#!/usr/bin/env python
"""Tests for the directory module"""
import os
from translate.storage import directory
class TestDirectory(object):
"""a test class to run tests on a test Pootle Server"""
def setup_method(self, method):
"""sets up a test directory"""
print("setup_method called on", self.__class__.__name__)
self.testdir = "%s_testdir" % (self.__class__.__name__)
self.cleardir(self.testdir)
os.mkdir(self.testdir)
def teardown_method(self, method):
"""removes the attributes set up by setup_method"""
self.cleardir(self.testdir)
def cleardir(self, dirname):
"""removes the given directory"""
if os.path.exists(dirname):
for dirpath, subdirs, filenames in os.walk(dirname, topdown=False):
for name in filenames:
os.remove(os.path.join(dirpath, name))
for name in subdirs:
os.rmdir(os.path.join(dirpath, name))
if os.path.exists(dirname):
os.rmdir(dirname)
assert not os.path.exists(dirname)
def touchfiles(self, dir, filenames, content=None):
for filename in filenames:
with open(os.path.join(dir, filename), "w") as fh:
if content:
fh.write(content)
def mkdir(self, dir):
"""Makes a directory inside self.testdir."""
os.mkdir(os.path.join(self.testdir, dir))
def test_created(self):
"""test that the directory actually exists"""
print(self.testdir)
assert os.path.isdir(self.testdir)
def test_basic(self):
"""Tests basic functionality."""
files = ["a.po", "b.po", "c.po"]
files.sort()
self.touchfiles(self.testdir, files)
d = directory.Directory(self.testdir)
filenames = [name for dir, name in d.getfiles()]
filenames.sort()
assert filenames == files
def test_structure(self):
"""Tests a small directory structure."""
files = ["a.po", "b.po", "c.po"]
self.touchfiles(self.testdir, files)
self.mkdir("bla")
self.touchfiles(os.path.join(self.testdir, "bla"), files)
d = directory.Directory(self.testdir)
filenames = [name for dirname, name in d.getfiles()]
filenames.sort()
files = files * 2
files.sort()
assert filenames == files
def test_getunits(self):
"""Tests basic functionality."""
files = ["a.po", "b.po", "c.po"]
posource = '''msgid "bla"\nmsgstr "blabla"\n'''
self.touchfiles(self.testdir, files, posource)
d = directory.Directory(self.testdir)
for unit in d.getunits():
assert unit.target == "blabla"
assert len(d.getunits()) == 3
| claudep/translate | translate/storage/test_directory.py | Python | gpl-2.0 | 2,805 |
# This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2008 - 2010 by Thomas Wiegart
#
"""
Rejoin, if kicked. (note: this is often a bad idea!)
"""
from otfbot.lib import chatMod
class Plugin(chatMod.chatMod):
def __init__(self, bot):
self.bot = bot
def kickedFrom(self, channel, kicker, message):
if int(self.bot.config.get("enabled", False, "autorejoin", self.bot.network, channel)):
self.bot.join(channel)
| Farthen/OTFBot | otfbot/plugins/ircClient/autorejoin.py | Python | gpl-2.0 | 1,120 |
from distutils.core import setup
setup(
name='LSJuicer',
version='0.1dev',
author='Ardo Illaste',
author_email='[email protected]',
packages=['lsjuicer',
'lsjuicer.data',
'lsjuicer.data.analysis',
'lsjuicer.data.models',
'lsjuicer.data.pipes',
'lsjuicer.inout',
'lsjuicer.inout.converters',
'lsjuicer.inout.readers',
'lsjuicer.inout.db',
'lsjuicer.resources',
'lsjuicer.static',
'lsjuicer.ui.items',
'lsjuicer.ui.scenes',
'lsjuicer.ui.plot',
'lsjuicer.ui.tabs',
'lsjuicer.ui.views',
'lsjuicer.ui.widgets',
'lsjuicer.ui.windows',
'lsjuicer.util',
],
scripts=['bin/run_qt.py'],
url='https://code.google.com/p/lsjuicer/',
license='LICENSE.txt',
description='',
long_description=open('README.txt').read(),
#install_requires=[
# "SQLAlchemy>=0.8.1",
# "alembic>=0.5.0",
# "numpy>=1.7.1",
# "scikit-learn>=0.13.1",
# "scikits-image>=0.7.1",
# "scipy>=0.12.0",
# "webcolors>=1.4",
#],
)
| ardoi/datajuicer | setup.py | Python | gpl-3.0 | 1,212 |
#!/usr/bin/env python
"""
BeginDate:20050824
CurrentRevisionDate:20091021
Development Version : xayastats 001
Release Version: pre-release
Filename: xayastats.py
Filedescription: xayastats.py contains basic statistics and exploratory data analysis
primitives.
Author(s): Mishtu Banerjee
Contact: [email protected]
Copyright: The Authors
License: Distributed under MIT License
[http://opensource.org/licenses/mit-license.html]
Environment: Programmed and tested under Python 2.3.3 on Windows 2000.
Dependencies:
Python Interpreter and base libraries.
xayacore
==============================================
XAYAstats_001 A Toolkit for Model Driven Data Analysis
==============================================
XAYAstats adds basic statistics capabilities to the data processing
capabilities of XAYAcore.
XAYAstats adds control functions that call on the lower level genStats functions
that operate on lists, so they can work with XAYA format data sets.
This module does not use Numeric -- to limit dependencies It simply
uses the Python built in fns. It uses a functional style, starting with
simple stat functions, and building more complex stats from them like
in the language R or in MatLab.
Send bugs, fixes, suggestions to [email protected] (Thanks)
Description of Functionality of XAYAstats
The basic unit in XAYAstats is a 'Dataset'..
While primitives in XAYAstats operate on lists, the "control" functions
in XAYAstats operate on Datasets (from which they extract and compose lists
as needed. genStats assumes it's input is a "dataset". A dataset has
3 core components: 'VARIABLES', 'DATA', 'VARTYPES'
There are several optional components: GROUPS, SORTS, VARDICTIONARY.
Each of these components is represented as a XAYAcore format graph.
A Dataset is a collection of these graphs, held in a Python dictionary.
a dataset is a dictionary that contains several XAYA format graphs as
its values:('DATA' and 'VARIABLES' are required, others are optional)
'DATA': keys are "autonumbers" representing a line of data, and
values are the tuple of data
'VARIABLES': keys are the variable names, and values are the
index position of that variable in the tuples in 'DATA'.
'VARTYPES'. keys are the variable names and values are the data type.
There are five data types:
'INTEGER' -- a whole number
'NUMERICAL' -- any number with a decimal
'CNUMBER' -- a complex number -- not currently supported
'CATEGORICAL' -- a series of labels
'TEXT' -- free text.
'VARTYPES' in this case are a set of data-analysis centric categories.
Additional types would exist in a database centric,
or programming language centric characterization of data types.
'DATA' and 'VARIABLES' are mandatory keys, 'VARTYPES' is optional.
In the abscence of explicit type information, data
is cast based on a series of tests (see the function testTypes)
Example of the structure of a XAYAstats dataset
dataset = {'VARIABLES' : {'Var1' : [0],
'Var2': [1],
'Var3': [ 2] }
'DATA' : {1 : [1,2,3],
2 : [4, 5, 6]}
'TYPES' : {'Var1' : 'CATEGORICAL',
'Var2' : 'NUMERICAL',
'Var3' : 'INTEGER'}
}
Description of Functionality of XAYAstats data analysis primitives:
These 'primitives' for data analysis generate
basic statistics. They can be called individually.
They can be combined. If you have a module to access
dbs and files and represent the data as lists, then
this package can calculate all basic stats on those
lists.
Probably 80 % of the stats people calculate are these
very simple stats.
UNIVARIATE STATISTICAL FUNCTIONS:
getStats -- Calculates univariate statistics for a dataset based on
a user-supplied list of selected statistics. Returns a XAYAstats
format dataset.
getSummaryStats -- Calculates a pre-defined list of summary statistics
for a dataset. Returns results in a dictionary format that is easy to
display with the pretty-print function.
callStats -- Calls the individual statistics functions and returns a
XAYAstats format dataset. Is itself called by getStats. Can call
any of the stats primitives in the list below from minstat to cvstat.
Can not call summarystat function.
getHistograph -- returns a histograph from a dataset. See the
histograph function for more details.
minstat -- minimum
maxstat -- maximum
rangestat -- absolute range
sumstat -- sum of values
sumsqstat -- sum of squares of values
meanstat -- mean
medianstat -- median
varstat -- variance of a sample
sdstat -- standard deviation of a sample
cvstat -- coefficient of variation -- biologists like
it, for reasons that were never clear to me
summarystat -- provides dictionary of basic summary
statistics for a sample (not in XAYAstats format,
but easy to view with pretty-print function)
STATS VARIABLE TRANSFORMATIONS:
dlist -- list of deviations from mean value
zlist -- list of z scores -- deviation from mean value
divided by standard deviation
rlist -- dev from min divided by range -- list between 0 and 1
DISTRIBUTIONS:
histograph -- Counts occurances of each unique item in a list and
represents the results as a XAYAcore format graph.
Description of functionality of XAYAstats dataset handling functions:
Functions that operate on XAYAstats format Datasets
getDataset : Creates a XAYAstats format dataset (calling readFile and getData)
viewData : Allows one to view the Data components of a Dataset
viewVariables: Allows one to view the Variables component of a Dataset
saveData: Saves the Data component of a Dataset to a XAYAcore style text file.
saveVAriable: Saves the Variable component of a Dataset to a XAYAcore
style text file.
getDatalist: Extracts a Variable from a Dataset -- does not cast to
a particular variable type.
(can be treated as either CATEGORICAL or TEXT)
getNumberslist: Extracts a Variable as a list of numbers from a
Dataset via castDatalistToNumerical
getIntegerslist: Extracts a Variable as a list of integers.
Functions that operate on a File
readFile : Reads csv file into dictionary keyed to file line numbers.
Headings at line 0.
getData: Cleans up dictionary, strips off extra spaces,
turns the dictionary value into a list of numbers.
getVariables: From csv file header, creates a graph where the
column name is the key, and the column index is the value.
This dictionary extracts lists of values by column name.
Functions that operate on Lists
castDatalistToNumerical: Converts a Datalist to floating point numbers.
castDatalistToInteger: Converts a Datalist to integers.
getNumberslist: Extracts a Variable as a list of numbers from a
Dataset via castDatalistToNumerical
Functions that operate on individual data items
testTypes -- Tests whether an item is INTEGER, NUMERICAL, CATEGORICAL,
or TEXT
Description of functionality of Utility Functions:
These functions are designed to "round out" XAYAstats by
providing it with additional data manipulation ('data munging' aka
'data crunching') abilities to further organize, clean-up, and transform
data. Additionally there is support for basic sampling procedures, and
basic probability calculations will be added soon.
Sampling and audit Functions:
auditDataset -- Produces an audit sample for a dataset, using a
probability for inclusion based on the number of audit samples
requested. The function will return approximately the number
of samples requested.
auditData -- Called by auditDataset. Handles the sampling process.
Can be used to sample any XAYAcore format graph.
sampleDataset -- Samples a dataset with or without replacement.
This function can be used for various monte-carlo like simulations
based on repeated sampling from a dataset.
samplePopulationR -- Called by sampleDataset if replacement = 'Y'.
samplePopulationNoR -- Called by sampleDataset if replacement = 'N'
Probability Calculators
diceroll -- approximates rolling a number of dice, each of which can
have an aribtrary number of sides. For example, diceroll(5, 6)
would be 5 rolls of a 6 sided dice
Data munging Functions (aka data crunching ....)
sortlist -- a Python 2.3 version of the "sorted" feature in Python 2.4.
Sorts a copy of a list, and leaves the original list unchanged.
SOURCES:
Sources for specific statistics are cited in the docstring for the statistic.
The rest of the statistics, just follow from their standard definitions.
USAGE EXAMPLEs:
(see examples under individual functions -- not implemented currently)
REFERENCES:
# KNOWN BUGS
# None Known at this point
# UNITTESTS
# Unittests are in the file test_xayastats
# DESIGN CONTRACTS
# Design Contract for each fn/obj are currently described in comments/docstring
#DOCTESTS
# Used sparingly where they illustrate code above and beyond unittest
"""
import types
from math import sqrt
import xayacore
import pprint
import random
# To allow the code to use built-in set fns in Python 2.4 or the sets module in Python 2.3
try :
set
except NameError:
from sets import Set as set
def xayastatsVersion():
return "Current version is xayastats_001 (development branch), updated September 6, 2005"
# ---------- Utility Functions ----------
# These functions handle basic 'data munging' tasks, calculate probablities,
# allow for sampling and audit of data
# Basic math tricks for sums and series
def listDatasets(objects = dir()):
"""
Utility function to identify currently loaded datasets.
Function should be called with default parameters,
ie as 'listDatasets()'
"""
datasetlist = []
for item in objects:
try:
if eval(item + '.' + 'has_key("DATA")') == True:
datasetlist.append(item)
except AttributeError:
pass
return datasetlist
def auditDataset (auditsamples=1, dataset ={}, randseed=12345213):
"""
Wraps auditData function to operate on a XAYAstats format dataset.
Type help(auditdata) for more information on the underlying function
"""
data = dataset['DATA']
auditsample = {}
auditsample['DATA'] = auditData(auditsamples, data, randseed)
auditsample['VARIABLES'] = dataset['VARIABLES']
return auditsample
def auditData(auditsamples = 1, data = {}, randseed = 12345213 ):
"""
Samples a DATA graph based on a probability of inclusion
which is the ratio auditsamples/len(data). So, one recieves
approximately the number of samples selected. From Data Crunching
pg 170. This function uses a fixed seed, so it's results
for a particular input are repeatable. The default seed is
taken from the example in Data Crunching.
"""
auditdata = {}
random.seed(randseed)
if (not data) or (len(data) <= auditsamples):
auditdata = data
return auditdata
else:
prob = float(auditsamples)/len(data)
keys = data.keys()
keys.remove(0)
for key in keys:
if random.random() < prob:
auditdata[key] = data[key]
auditdata[0] = data[0]
return auditdata
def samplePopulationR(samples, population):
"""
Samples a population with replacement
The same population member can be sampled
more than one time
In this function we are imagining s rolls of a
single dice whose number of sides is equal
to the population size. As population approaches
infinity, the dice becomes round ... but you'll
be dead before it happens.
"""
samplesList = [ ]
for sample in range(samples):
randomsample = diceroll(1,population)
samplesList.append(randomsample)
return samplesList
def samplePopulationsNoR(samples, population):
"""
Samples a population without replacement.
If a population member is drawn once, it can not
be drawn again.
"""
maxsamples = samples
initialsamples = 0
samplesList = [ ]
while (len(samplesList) < maxsamples) :
if len(samplesList) == population: break
thissample = diceroll(1,population)
if thissample not in samplesList:
samplesList.append(thissample)
return samplesList
def diceroll (dice, sides):
"""
Simulate rolling d dice of s sides each
From Python Cookbook1st Edn pg 531
See the Python Cookbook entry on this
function for the non-standard use of 'reduce'
for example diceroll(5, 6) would be 5 rolls
of a 6 sided dice
"""
def accumulate (x, y, s=sides):
return x + random.randrange(s)
return reduce(accumulate, range(dice+1) )+ dice
def dicebool (dice, sides, hinge):
''' Booleanizes the resuls of a diceroll, based on
a hinge value. For example, if we wanted to have an
item selected with probability 0.5, we could use a
10 sided dice, and assign 1 to any case where the diceroll
value is <= 5. If we wanted probability 0.9 we could again
use a 10 sided dice, and set the hinge at 9. If we wanted to
select something with probability 0.63, we could roll a 100 sided
dice, and use a hinge of 0.63. So this is a way of selecting items
with an arbitary probability.
'''
roll = diceroll(dice, sides)
# print roll
if roll <= hinge:
return 1
else: return 0
def sortlist(iterable = [ ]):
"""
A utility function that returns a sorted list, but recieves the
original list unchanged: from The Python Cookbook 2nd Edn pg 192
"""
alist = list(iterable)
alist.sort()
return alist
def sumIntegerSeries(start = 1, stop = 100, difference = 1):
"""
This formula is taken from pg 78/79 of "The Art of the Infinite" by Robert and
Ellen Kaplan. It's based on an ancient trick for summing a series of numbers
without having to go through all the middle-work
For example if start = 1, stop = 5, and difference = 3
sumint = 1 + 4 + 7 + 13 = 35
There's also the famous "schoolboy in class exercise": Sum 1 to 100
start = 1, stop = 100, difference = 1
sumint = 1 + 2 + 3 + 4 + .... + 100 = 5050
"""
sumint = (stop * (2 * start + (stop - 1) * difference)/2)
return sumint
# ---------- readDataSetsdsv Functions ----------
# These functions are read a csv file, and put it in a xayastats dataset form
# May need further modification to put in XAYA format.
def getDataset(filepath = ""):
"""
Reads a comma separated variables (csv) file and creates a dataset.
A dataset has the following components: Variables, Data, Types, Groups, Sorts
To call each componenet of a dataset, do the following.
Say you have a dataset named: somedata
somedata['VARIABLES'] accesses the variables in this dataset
somedata['DATA'] accesses the rows of data in this dataet.
"""
data = {}
data['VARIABLES'] = getVariables(filepath)
data['DATA'] = getData(filepath)
return data
# Notes for version with type support added
# def getDataset(filepath = "", varGraph = {}, typesGraph = {}):
# """ Given a filepath, create a data table and a variables table """
#
# data = {}
# if varGraph == {}:
# data['VARIABLES'] = getVariables(filepath)
# else:
# data['VARIABLES'] = varGraph
# if typesGraph == {}:
# data['VARTYPES'] = typesGraph
# else: data['VARTYPES'] = guessTypes
# data['DATA'] = getData(filepath)
# return data
def saveDataset(dataset = {}, filepath = "", append=0):
"""
Reads a XAYA format dataset into a csv file where the first row contains
the file headings and all other rows contain the data. Inverse of getDataset.
Algorithm:
1. 'DATA' component of dataset translated into a list of strings -- transList
2. Write transList to a file object
'append' keyword: 0 includes headings, not 0 is append mode and headings are not written
"""
# 'DATA' component of dataset translated into a list of strings -- transList (see xayacore transGraphToList)
transList = []
# If append mode -- then skip line 0 -- the headings
if append != 0:
for key in dataset['DATA']:
if key != 0:
valueString = " ".join([str(x) + ',' for x in dataset['DATA'][key]]).rstrip(',')
newline = '\n'
finalString = valueString + newline
transList.append(finalString)
#If not in append mode, include all lines
else:
for key in dataset['DATA']:
valueString = " ".join([str(x) + ',' for x in dataset['DATA'][key]]).rstrip(',')
newline = '\n'
finalString = valueString + newline
transList.append(finalString)
# Write transList to a file object (see xayacore writeGraph)
fileObject = open(filepath, 'a+')
fileObject.writelines(transList)
fileObject.flush()
return fileObject
def htmlDataset(dataset = {}, title=""):
""" Utility function to generate HTML Table from a Dataset"""
content = "<TABLE cellpadding=5> <caption align=top>" + title + " </caption><TR></TR><TR></TR>"
for row in dataset['DATA']:
content += "<TR>"
for col in dataset['DATA'][row]:
if row==0:
content += "<TH align=left bgcolor=#BBDB88>"
else:
content += "<TD align=left bgcolor=#FFFAB2>"
content += col
if row==0:
content += "</TH>"
else:
content += "</TD>"
content += "</TR>"
content += "</TABLE>"
return content
def saveHtmlDataset(dataset = {},title="", filepath = ""):
""" Saves a dataset in HTML format to a file,
for easy viewing in a web browser
"""
html = htmlDataset(dataset, title)
fileObject = open(filepath, 'a+')
fileObject.write(html)
fileObject.flush()
return fileObject
def viewData(dataset):
"""
Given a dataset, print it out in reader friendly format
"""
return pprint.pprint(dataset['DATA'])
def viewVariables(dataset):
"""
Given a dataset, print out the heading fields in reader friendly format
"""
for entry in dataset['DATA'][0]:
pprint.pprint(str(entry) + " : " + str(dataset['VARIABLES'][str(entry)]))
def saveData(dataset={}, filepath= 'defaultFilePath'):
"""
Saves the 'DATA' component of a dataset as an ascii file
in XAYAcore graph format.
"""
return xayacore.writeGraph(dataset['DATA'], filepath)
def saveVariables(dataset={}, filepath= 'defaultFilePath'):
"""
Saves the 'VARIABLES' component of a dataset in an ascci file
in XAYAcore graph format
"""
return xayacore.writeGraph(dataset['VARIABLES'], filepath)
def readFile(filepath =""):
""" Reads file object into a dictionary keyed to line number """
try:
fileobj = open(filepath, 'r')
except IOError:
print "File not opened, could not find", filepath
else:
# print "-------------------------------"
# print
# print "Opening file", filepath -- Disabled as adds to much "gibberish" to interpreter session when opening lost of files at once
datatable = {}
autonumberkey = -1
for line in fileobj:
autonumberkey = autonumberkey + 1
datatable[autonumberkey] = line
fileobj.close()
return datatable
# def writeFile(filepath=" "):
def getData(filepath =" "):
""" Cleans up readFile dictionary so it can function as a relation tbl like dataset """
data = readFile(filepath)
dataGraph = {}
for key in data.keys():
dataGraph[key] = [column.strip() for column in data[key][:-1].split(",")] # strip newline at end
return dataGraph
def getVariables(filepath =""):
""" Extracts a dictionary of VariableName: Index pairs from getData """
data = getData(filepath)
variables = {}
for index in range (len(data[0])):
variables[data[0][index]] = [index]
return variables
# ---------- Sorting and Grouping Functions ----------
def getSorts(dataset = {}, sortVars = [], sortOrder = 'ASC'):
"""
Adds a SORTS component to a XAYA format dataset.
Given a XAYA format dataset and a list of variables to sort on,
returns the same dataset, with the addition of the SORTS component.
The SORTS component is a list of keys in dataset['DATA']
in sort order. The default sort order is ascending ('ASC').
For descending sort order, enter 'DESC'
Algorithm:
# Extract the 'DATA' component of dataset to sort
# Extract the index positions of variablse we wish to sort DATA by
# Create a dictionary whose values include only the data we wish to sort by
# Create a list of tuples where:
# The first tuple element (0) is a list of the data to sort by
# The second tuple element (1) is the dictionary key.
# Data headings from this list of tuples (tuple 0) have been removed
# Sort the tuples based on the selected variables:
# If sort order is 'ASC' -- sort ascending
# if sort order is 'DESC' -- sort descending
# Create a new dataset
# Copy the original dataset to the new dataset
# Add a new 'SORTS' component to the new dataset
# Return the new dataset with 'SORTS' added.
Note: One can later reverse the order in which data is displayed
by reversing the 'SORTS' component. So consider using the default
sort-order 'ASC', unless there is a compelling reason why the natural sort
for this data would be in descending order, 'DESC'
"""
sortdata = {} # Set up a dictionary for data that will be the basis of the sort
newdataset = {} # set up a new dataset that will include the sort order data
# Extract the 'DATA' component of dataset to sort
data = dataset['DATA']
variables = dataset['VARIABLES']
# Extract the index positions of variables we wish to sort DATA by
varindexlist = []
for vars in sortVars:
varindexlist.append(variables[vars][0])
keylist = [] # list that will hold keys in the final sort order
#Create a dictionary whose values include only the data we wish to sort by
for row in data:
rowlist = []
for var in varindexlist:
rowlist.append(data[row][var])
sortdata[row] = rowlist
# Create a list of tuples where:
# The first tuple element (0) is a list of the data to sort by
# The second tuple element (1) is the dictionary key.
# Data headings from this list of tuples (tuple 0) have been removed
sortdatalist = [(values, row) for row, values in sortdata.items()]
sortdatalist.pop(0)
# Sort the tuples based on the selected variables:
sortedlist = sortlist(sortdatalist)
for tuple in sortedlist:
keylist.append(tuple[1]) # Extract keys in ascending sort order
if not sortOrder == 'ASC':
keylist.reverse() # Extract keys in descending sort order
# Create a new dataset that contains the original data and the new SORTS component.
for key in dataset.keys():
newdataset[key] = dataset[key]
newdataset['SORTS'] = keylist
return newdataset
def sortedDataset(dataset={}, sortOrder='ASC'):
"""Sorts a Dataset in Variable Order """
# varitems = dataset['VARIABLES'].items()# Tuples with variable key, and variable index no
varlist = [ (x, y) for y, x in dataset['VARIABLES'].items()]
varlist.sort()
variables = []
for tuple in varlist:
variables.append(tuple[1])
sortlist = getSorts(dataset, variables,sortOrder)['SORTS']
sorteddataset = {}
sorteddataset['DATA'] ={}
sorteddataset['DATA'][0] = dataset['DATA'][0]
autonumberkey = 0
for item in sortlist:
autonumberkey = autonumberkey + 1
sorteddataset['DATA'][autonumberkey] = dataset['DATA'][item]
for key in dataset.keys():
if key != 'DATA':
sorteddataset[key] = dataset[key]
return sorteddataset
def getGroups( dataset = {}, groupVars = []):
"""
Adds a GROUPS component to a XAYA format dataset.
Given a XAYA format dataset and a list of variables to group on,
returns the same dataset, with the addition of the GROUPS component.
The GROUPS component contains a dictionary where keysrepresen unique groups,
and values are the list of rows (keys) in dataset['DATA'] that are members of that group
Algorithm:
# Extract the 'DATA' component of dataset to group
# Extract the index positions of variables we wish to group DATA by
# Create a dictionary whose values include only the data we wish to group by
# Get a list of all values to aggregate into groups
# Create the set of unique groups
# Assign rows of data to a particular group
"""
groupdata = {} # Set up a dictionary for data that will be the basis of the grouping
newdataset = {} # set up a new dataset that will include the sort order data
# Extract the 'DATA' component of dataset to group
data = dataset['DATA']
copydata = data.copy() # Make a shallow copy data -- so 'del' in next statement leaves original data untouched
del copydata[0] # Get rid of the headings row in copydata -- leaves data untouched
variables = dataset['VARIABLES']
# Extract the index positions of variables we wish to group DATA by
varindexlist = []
for vars in groupVars:
varindexlist.append(variables[vars][0])
# Create a dictionary whose values include only the data we wish to group by
for row in copydata:
rowlist = []
for var in varindexlist:
rowlist.append(copydata[row][var])
groupdata[row] = rowlist
# Get a list of all values to aggregate into groups
groupslist = groupdata.values() # Result is a list of lists -- we want a list of tuples, so can use the groups as keys in a dictionary
# Convert groups from lists to tuples
grouptuples = [tuple(group) for group in groupslist]
# Create the set of unique groups
groupset = set(grouptuples)
# Initialize a dictionary to hold the groups
groupsdict = {}
for group in groupset:
groupsdict[group] = [ ]
# Assign rows of data to a particular group
for row in groupdata:
for member in groupset:
if tuple(groupdata[row]) == member:
groupsdict[member].append(row)
# Create a new dataset that contains the original data and the new GROUPS component.
for key in dataset.keys():
newdataset[key] = dataset[key]
newdataset['GROUPS'] = groupsdict
return newdataset
def getTypes(dataset = {}, typesGraph = {}):
"""
Adds a TYPES component to a dataset, and casts the data based
on the typesGraph (a XAYA format graph) whose nodes are the variable names
and whose edges are the data type for that variable.
Default data type is string.
Data of types 'CATEGORICAL' and 'TEXT', remains as string. Data of
type 'INTEGER' is cast to via the python fn int(). Data of type 'NUMERICAL' is
cast via the python fn float()
Algorithm:
# Check that variables in dataset match variables in typesGraph
# Check that there are no unknown types in typesGraph
# Extract a sorted list of the index positions and names of each variable
# Create a dictionary with data values cast to appropriate types
# 'TEXT' and 'CATEGORICAL' are left as is
# 'INTEGER' is cast via int() fn
# 'NUMERICAL' is cast via float() fn
# Create a new dataset that contains the original data and the new TYPES component.
"""
# Set up of variables needed later in function
data = dataset['DATA']
copydata = data.copy() # Make a shallow copy data -- so 'del' in next statement leaves original data untouched
del copydata[0] # Get rid of the headings row in copydata -- leaves data untouched
variables = dataset['VARIABLES']
vars = variables.keys()
typevars = typesGraph.keys()
typedata = {} # Set up a dictionary for data that is cast to type
newdataset = {}# set up a new dataset that will include the Types component
tocast = ['INTEGER', 'NUMERICAL']
tonotcast = ['TEXT', 'CATEGORICAL']
alltypes = ['INTEGER', 'NUMERICAL','TEXT', 'CATEGORICAL']
# Check that variables in dataset match variables in typesGraph
for variable in vars:
if variable not in typevars:
print 'There is a mismatch between Dataset Variables and the typesGraph'
return newdataset
# Check thatthere are no unknown types in the typesGraph
typevalues = [value[0] for value in typesGraph.values()]
for type in typevalues:
if type not in alltypes:
print 'There is an unknown data type in typesGraph'
return newdataset
# Extract a sorted list of the index positions and names of each variable
varitemslist = [(value[0], key) for key, value in variables.items()]
varitemslist.sort()
# Create a dictionary with data values cast to appropriate types
typedata[0] = data[0]
for row in copydata:
rowlist = []
for varitem in varitemslist:
castitem = data[row][varitem[0]]
typeitem = typesGraph[varitem[1]][0]
if typeitem in tonotcast: # Leave 'TEXT' and 'CATEGORICAL' as is
rowlist.append(castitem)
elif typeitem == 'INTEGER':
rowlist.append(int(float(castitem))) # Need to cast to float first for things like '1839.00'
else:
rowlist.append(float(castitem))
typedata[row] = rowlist
# Create a new dataset that contains the original data and the new TYPES component.
for key in dataset.keys():
if key != 'DATA':
newdataset[key] = dataset[key]
newdataset['TYPES'] = typesGraph
newdataset['DATA'] = typedata
return newdataset
def filterDataset(dataset = {}, filterVar = '', filterValues = []):
"""
Filters a XAYA format dataset on a single variable for a list of filter values.
Given a XAYA format dataset, a filterVar and a list of filterValues,
this function returns a dataset containing only those rows that contain the filter values.
If you wish to filter on multiple variables -- you can run this function several times.
Examples:
>>> filteredbooks1 = xayastats.filterDataset(books, 'PositiveReviews', [250])
# returns only those rows from books dataset which have 250 positive reviews
>>> filteredbooks2 = xayastats.filterDataset(books, 'Author', ['Mishtu', 'Ian'])
# returns only those rows from books dataset whose authors are either Ian or Mishtu
# Note -- be if the dataset is cast to datatypes, you must have the correct datatype in your list.
For example, 250 is not the same as '250' -- one is an integer and the other is a string
Algorithm:
# Extract the 'DATA' component of dataset to filter
# Extract the index positions of variable we wish to filter by
# Build a filtered dataset"
By looping through data rows and checking the variable (at index position)
And adding only those rows whose value is in the filterValues list
"""
# Set up of variables needed later in function
data = dataset['DATA']
datarows = data.keys()[1:]
variables = dataset['VARIABLES']
varIndex = variables[filterVar][0]
vars = variables.keys()
filterdataset = {}# set up a filter dataset
filterdataset['DATA'] = {}
filterdataset['DATA'][0] = data[0]
for row in datarows:
if data[row][varIndex] in filterValues:
filterdataset['DATA'][row] = data[row]
# Create a new dataset that contains the original data and the new TYPES component.
for key in dataset.keys():
if key != 'DATA':
filterdataset[key] = dataset[key]
return filterdataset
def joinDatasets(parentDataset={}, childDataset={}, joinKeys=[]):
"""
This function approximates a relational join on two datasets with a common key.
It assumes the common keys have identical names. A one-to-many relationship is
assumed between the parentDataset and childDataset. A special case being a one-to-one
relationship between the two datasets.
Algorithm:
For each dataset
Create a Graph with joinKeys as nodes, and rows for a joinKey as arcs
For each JoinKey in Parent Dataset (should be a single row)
For each JoinKey in Child Dataset matching parent Joinkey
"""
# ---------- Data Extraction and Type-Casting Functions ----------
def getDatalist (dataGraph = {}, varGraph = {}, variable = "" ):
""" Produces a data list for the via dataset and variables dictionary for a named variable"""
# List needs to be converted if data is to be treated as Integer or Numerical (floats)
# Default is to treat data as Categorical
data = dataGraph
vars = varGraph
var = variable
datalist = []
""" Extracts a Variable Column as a datalist, using dataset and column dictionaries"""
keys = data.keys()
keys.remove(0)
for key in keys:
datalist.append(data[key][varGraph[var][0]])
return datalist
def castDatalistToNumerical(list = []):
""" Converts a list to float number values """
datalist = list
numberlist = []
for item in datalist:
numberlist.append(float(item))
return numberlist
def getNumberslist(dataGraph = {}, varGraph = {}, variable = ""):
return castDatalistToNumerical(getDatalist(dataGraph, varGraph, variable))
def castDatalistToInteger(list = []):
""" Converts a list to float number values """
datalist = list
numberlist = []
for item in datalist:
numberlist.append(int(item))
return numberlist
def getIntegerslist(dataGraph = {}, varGraph = {}, variable = ""):
return castDatalistToInteger(getDatalist(dataGraph, varGraph, variable))
def testTypes(item = None):
"""
A set of tests to distinguish between the various XAYAstats data types:
TEXT, CATEGORICAL, INTEGER, NUMERICAL
Definition:
Item is None or empty string, there is no data
Items must be either strings or numbers according to Python
letters are not numbers
text has white space
integers do not straggle over the decimal line
"""
# Default case -- item is None, or empty string returns no data
if item == None or item == "":
return 'NODATA'
else:
# Items must be either strings or numbers according to Python
try:
float(item)
except TypeError:
print "Item is not one of the following: TEXT, CATEGORICAL, INTEGER, NUMERICAL"
return 'UNDEFINED'
# Letters are not numbers
except ValueError:
# text has white space
if list(item).count(' ') == 0:
return 'CATEGORICAL'
else:
return 'TEXT'
# integers do not straggle over the decimal line
if int(float(item)) == float(item): # int(float(item) prevents ValueError for items like '24.0'
return 'INTEGER'
else:
return 'NUMERICAL'
# ---------- genStats Functions ----------
def getSummarystats(dataset = {}, variable = ""):
"""
Calculates a set of basic statistics on variable in a dataset
and returns results as a dictionary of summary statistics.
Note results are for viewing ease, but are not in XAYAstats
dataset format.
"""
data = getNumberslist(dataGraph = dataset['DATA'], varGraph = dataset['VARIABLES'], variable = variable)
return summarystat(data)
def summarizedDataset(dataset={}):
"""Summarizes a dataset.
Stage 1: Summarizes numerical variables
Stage 2: Summarizes numerical variables, grouped by categorical variables.
"""
# Get Variables into a list, sorted by variable position
varlist = [ (x, y) for y, x in dataset['VARIABLES'].items()]
varlist.sort()
variables = []
for tuple in varlist:
variables.append(tuple[1])
# Loop through variables
summarydata = {}
summarydata['DATA'] = {}
summarydata['DATA'][0] = ["Variable", "Min", "Max", "Mean", "Median", "Range", "SD", "CV"]
summarydata['VARIABLES'] = {"Variable" : [0],
"Min": [1],
"Max": [2],
"Mean": [3],
"Median": [4],
"Range": [5],
"SD": [6],
"CV": [7]}
autonumberkey = 0
for var in variables:
# Test that variables are numerical
datalist = getDatalist(dataset['DATA'],dataset['VARIABLES'], var)
datalist.sort()
try: float(datalist[0])
except ValueError: continue
try: float(datalist[-1])
except ValueError: continue
# If numerical, calculate summary stats
autonumberkey = autonumberkey + 1
summarygraph = getSummarystats(dataset,var)
# Move data from individual graphs to dataset format
summarydatalist = []
summarydatalist.append(var)
summarydatalist.append(summarygraph['min'][0])
summarydatalist.append(summarygraph['max'][0])
summarydatalist.append(summarygraph['mean'][0])
summarydatalist.append(summarygraph['median'][0])
summarydatalist.append(summarygraph['range'][0])
summarydatalist.append(summarygraph['sd'][0])
summarydatalist.append(summarygraph['cv'][0])
summarydata['DATA'][autonumberkey] = summarydatalist
return summarydata
def getHistograph(dataset = {}, variable = ""):
"""
Calculates a histogram-like summary on a variable in a dataset
and returns a dictionary. The keys in the dictionary are unique items
for the selected variable. The values of each dictionary key, is the number
of times the unique item occured in the data set
"""
data = getDatalist(dataGraph = dataset['DATA'], varGraph = dataset['VARIABLES'], variable = variable)
return histograph(data)
def sortHistograph(histograph = {}, sortOrder = 'DESC'):
"""
Sorts an histograph to make it easier to view. The default is
descending (DESC) order (most to least frequent). Enter 'ASC' to produce
an histogram in ascending order (least to most frequent).
"""
sortedHist = []
sortedHist = sortlist([(value,key)for key,value in histograph.items()])
if not sortedHist == 'ASC':
sortedHist.reverse()
return sortedHist
def minstat(list = [ ]):
""" Returns minimum value in a list or tuple"""
return min(list)
def maxstat(list = [ ]):
""" Returns minimum value in a list or tuple"""
return max(list)
def rangestat(list = [ ]):
""" Returns the absolute range of values in a list or tuple"""
return abs(maxstat(list) - minstat(list))
def sumstat(*L):
"""
Sums a list or a tuple L
Modified from pg 80 of Web Programming in Python
"""
if len(L) == 1 and \
( isinstance(L[0],types.ListType) or \
isinstance (L[0], types.TupleType) ) :
L = L[0]
s = 0.0
for k in L:
s = s + k
return s
# Consider changing so uses the much simpler built-in sum() fn.
def sumsqstat(*L):
""" Sum of squares for a list or a tuple L"""
if len(L) == 1 and \
( isinstance(L[0],types.ListType) or \
isinstance (L[0], types.TupleType) ) :
L = L[0]
s = 0.0
for k in L:
sq = k * k # Calculates the Square
s = s + sq # Sums the Square
return s
# Consider changing so uses the much simpler built-in sum() fn
def meanstat(n):
""" Calculates mean for a list or tuple """
mean = sumstat(n)/float(len(n))
return mean
def medianstat(n):
"""
Calculates median for a list or tuple
Modified from pg 347 Zelle's CS intro book,
Python Programming, an Introduction to Computer Science
"""
s = n[:]
s.sort()
size = len (n)
midPos = size/2
if size % 2 == 0:
median = (s[midPos] + s[midPos - 1]) /2.0
else:
median = s[midPos]
return median
def dlist(list = [ ]):
""" Calculates a list of Deviations from Mean"""
diffs = [ ]
mean = meanstat(list)
for k in list:
diff = k - mean
diffs.append(diff)
return diffs
def varstat(list = [ ]):
""" Calculates the Variance for a list or tuple L"""
devs = dlist(list)
sumdevs = sumsqstat(devs)
var = sumdevs/(len(list)-1.0)
return var
def sdstat(list = [ ]):
""" Calculates the Variance for a list or tuple """
sd = sqrt(varstat(list))
return sd
def cvstat(list = [ ]):
""" Calculates the Coefficient of Variation"""
cv = sdstat(list)/meanstat(list) * 100
return cv
def summarystat(list = [ ]):
"""Summary Statistics for a List or Tuple"""
statsGraph = {}
min = minstat(list)
max = maxstat(list)
range = rangestat(list)
mean = meanstat(list)
median = medianstat(list)
sd = sdstat(list)
cv = cvstat(list)
statsGraph['min'] = [min]
statsGraph['max'] = [max]
statsGraph['range'] = [range]
statsGraph['mean'] = [mean]
statsGraph['median'] = [median]
statsGraph['sd'] = [sd]
statsGraph['cv'] = [cv]
return statsGraph
#Alternate summarystat implementation (as Xaya format graph)
# valuelist = [min,max,mean,median,sd,cv]
# keytuple = ('min','max','mean','median','sd','cv')
# for key in range(len(keytuple)):
# statsGraph[key] = valuelist[key]
# return statsGraph
# screwing up on indices and tuple list dict diffs
def zlist(list = [ ]):
"""Calculates z scores for a list or tuple"""
zeds = []
mean = meanstat(list)
sd = sdstat(list)
for k in list:
zed = (k - mean)/sd
zeds.append(zed)
return zeds
def rlist( list = [ ]):
"""Calculates Range scores with values between 0 and 1"""
rrs = []
min = minstat(list)
range = rangestat(list)
for k in list:
r = (k - min)/float(range)
rrs.append(r)
return rrs
def histograph(list = [ ]):
"""
A histograph is a histogram where the unique items
in a list are keys, and the counts of each item is the value
for that key. The format is in XAYA graph format (values are lists)
Definition:
Get the set of distinct items in a list
Count the number of times each item appears
"""
histoGraph = {}
# Get the set of distinct items in a list
distinctItems = set(list)
# Count the number of times each item appears
for item in distinctItems:
histoGraph[item] = [list.count(item)]
return histoGraph
# Add assertions
# assert meanstat(zscores) == 0
# assert sdstat(zscores) == 1.0
# assert minstat(rscores) == 0.0
# assert maxstat(rscores) == 1.0
| MishtuBanerjee/xaya | xaya/xayastats.py | Python | mit | 45,688 |
"""
Support for Speedtest.net.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import asyncio
import logging
import re
import sys
from subprocess import check_output, CalledProcessError
import voluptuous as vol
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import (DOMAIN, PLATFORM_SCHEMA)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
from homeassistant.helpers.restore_state import async_get_last_state
REQUIREMENTS = ['speedtest-cli==1.0.6']
_LOGGER = logging.getLogger(__name__)
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms[\r\n]+'
r'Download:\s(\d+\.\d+)\sMbit/s[\r\n]+'
r'Upload:\s(\d+\.\d+)\sMbit/s[\r\n]+')
CONF_SECOND = 'second'
CONF_MINUTE = 'minute'
CONF_HOUR = 'hour'
CONF_DAY = 'day'
CONF_SERVER_ID = 'server_id'
CONF_MANUAL = 'manual'
ICON = 'mdi:speedometer'
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]),
vol.Optional(CONF_SERVER_ID): cv.positive_int,
vol.Optional(CONF_SECOND, default=[0]):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 59))]),
vol.Optional(CONF_MINUTE, default=[0]):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 59))]),
vol.Optional(CONF_HOUR):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(0, 23))]),
vol.Optional(CONF_DAY):
vol.All(cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(1, 31))]),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Speedtest sensor."""
data = SpeedtestData(hass, config)
dev = []
for sensor in config[CONF_MONITORED_CONDITIONS]:
if sensor not in SENSOR_TYPES:
_LOGGER.error("Sensor type: %s does not exist", sensor)
else:
dev.append(SpeedtestSensor(data, sensor))
add_devices(dev)
def update(call=None):
"""Update service for manual updates."""
data.update(dt_util.now())
for sensor in dev:
sensor.update()
hass.services.register(DOMAIN, 'update_speedtest', update)
class SpeedtestSensor(Entity):
"""Implementation of a speedtest.net sensor."""
def __init__(self, speedtest_data, sensor_type):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format('Speedtest', self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return ICON
def update(self):
"""Get the latest data and update the states."""
data = self.speedtest_client.data
if data is None:
return
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
@asyncio.coroutine
def async_added_to_hass(self):
"""Handle all entity which are about to be added."""
state = yield from async_get_last_state(self.hass, self.entity_id)
if not state:
return
self._state = state.state
class SpeedtestData(object):
"""Get the latest data from speedtest.net."""
def __init__(self, hass, config):
"""Initialize the data object."""
self.data = None
self._server_id = config.get(CONF_SERVER_ID)
if not config.get(CONF_MANUAL):
track_time_change(
hass, self.update, second=config.get(CONF_SECOND),
minute=config.get(CONF_MINUTE), hour=config.get(CONF_HOUR),
day=config.get(CONF_DAY))
def update(self, now):
"""Get the latest data from speedtest.net."""
import speedtest
_LOGGER.info("Executing speedtest...")
try:
args = [sys.executable, speedtest.__file__, '--simple']
if self._server_id:
args = args + ['--server', str(self._server_id)]
re_output = _SPEEDTEST_REGEX.split(
check_output(args).decode('utf-8'))
except CalledProcessError as process_error:
_LOGGER.error("Error executing speedtest: %s", process_error)
return
self.data = {
'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2),
}
| MungoRae/home-assistant | homeassistant/components/sensor/speedtest.py | Python | apache-2.0 | 5,442 |
import unittest
import numpy as np
from bltest import attr
import vg
from lace.cache import sc, vc
from lace.mesh import Mesh
class TestGeometryMixin(unittest.TestCase):
debug = False
@attr('missing_assets')
def test_cut_across_axis(self):
original_mesh = Mesh(filename=sc('s3://bodylabs-assets/example_meshes/average_female.obj'))
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, minval=-0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.1, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Visualize
if self.debug:
mesh.show()
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, maxval=0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.1, decimal=1)
# Visualize
if self.debug:
mesh.show()
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, minval=-0.1, maxval=0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.1, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.1, decimal=1)
# Visualize
if self.debug:
mesh.show()
@attr('missing_assets')
def test_cut_across_axis_by_percentile(self):
original_mesh = Mesh(filename=sc('s3://bodylabs-assets/example_meshes/average_female.obj'))
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis_by_percentile(0, 25, 40)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), 0.03, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.10, decimal=1)
# Visualize
if self.debug:
mesh.show()
def test_first_blip(self):
'''
Create up a triagonal prism, the base of which is an equilateral
triangle with one side along the x axis and its third point on the
+y axis.
Take three origins and vectors whose first_blip should be the same
vertex.
'''
import math
from lace import shapes
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
test_cases = [
{'origin': [0, -0.5, 0], 'initial_direction': [-1, 0.3, 0]},
{'origin': [-1.1, -0.5, 0], 'initial_direction': [0, 1, 0]},
{'origin': [-1, 1, 0], 'initial_direction': [-1, -1, 0]},
]
# TODO This is a little sloppy. Because flatten_dim=2,
# [-1, 0, 0] and [-1, 0, 4] are equally valid results.
expected_v = [-1, 0, 0]
for test_case in test_cases:
np.testing.assert_array_equal(
prism.first_blip(2, test_case['origin'], test_case['initial_direction']),
expected_v
)
def test_first_blip_ignores_squash_axis(self):
'''
The above test is nice, but since the object is a prism, it's the same along
the z-axis -- and hence isn't really testing squash_axis. Here's another test
with a different setup which tries to get at it.
'''
import math
from lace import shapes
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
test_cases = [
{'origin': [-1, 0, -1], 'initial_direction': [0, 1, 0]},
]
expected_v = [0, math.sqrt(1.25), 0]
for test_case in test_cases:
np.testing.assert_array_equal(
prism.first_blip(0, test_case['origin'], test_case['initial_direction']),
expected_v
)
def test_reorient_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.reorient(up=[0, 1, 0], look=[0, 0, 1])
def test_scale_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.scale(7)
def test_translate_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.translate(1)
def test_centroid_is_undefined_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.centroid # pylint: disable=pointless-statement
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; centroid is not defined'
)
def test_bounding_box_is_undefined_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.bounding_box # pylint: disable=pointless-statement
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; bounding box is not defined'
)
def test_recenter_over_floor_raises_expected_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.recenter_over_floor()
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; centroid is not defined'
)
def test_predict_body_units_m(self):
mesh = Mesh()
mesh.v = np.array([[1.5, 1.0, 0.5], [-1.5, 0.5, 1.5]])
self.assertEqual(mesh.predict_body_units(), 'm')
def test_predict_body_units_cm(self):
mesh = Mesh()
mesh.v = np.array([[150, 100, 50], [-150, 50, 150]])
self.assertEqual(mesh.predict_body_units(), 'cm')
@attr('missing_assets')
def test_flip(self):
raw_box = Mesh(vc('/unittest/serialization/obj/test_box_simple.obj'))
box = Mesh(v=raw_box.v, f=raw_box.f)
box.reset_normals()
original_v = box.v.copy()
original_vn = box.vn.copy()
original_f = box.f.copy()
box.flip(axis=0)
box.reset_normals()
self.assertEqual(box.f.shape, original_f.shape)
self.assertEqual(box.v.shape, original_v.shape)
for face, orig_face in zip(box.f, original_f):
self.assertNotEqual(list(face), list(orig_face))
self.assertEqual(set(face), set(orig_face))
np.testing.assert_array_almost_equal(box.v[:, 0], np.negative(original_v[:, 0]))
np.testing.assert_array_almost_equal(box.v[:, 1], original_v[:, 1])
np.testing.assert_array_almost_equal(box.v[:, 2], original_v[:, 2])
np.testing.assert_array_almost_equal(box.vn[:, 0], np.negative(original_vn[:, 0]))
np.testing.assert_array_almost_equal(box.vn[:, 1], original_vn[:, 1])
np.testing.assert_array_almost_equal(box.vn[:, 2], original_vn[:, 2])
def test_reorient_faces_using_normals(self):
import math
from lace import shapes
from polliwog.tri import surface_normals
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
correct_faces = prism.f.copy()
# Generate normals that are slightly off in random directions.
prism.fn = vg.normalize(
surface_normals(prism.v[prism.f]) + \
0.05 * np.random.random(len(prism.f)*3).reshape(len(prism.f), 3))
# Flip a few of the faces.
n_to_flip = 4
to_flip = np.random.permutation(len(prism.f))[:n_to_flip]
prism.flip_faces(to_flip)
# Confidence check.
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, prism.f, correct_faces)
prism.reorient_faces_using_normals()
# Confidence check.
np.testing.assert_array_equal(prism.f, correct_faces)
| bodylabs/lace | lace/test_geometry.py | Python | bsd-2-clause | 8,485 |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.db.models import Q
from django.test import TestCase
class PhonenumerFieldAppTest(TestCase):
def test_save_field_to_database(self):
"""Basic Field Test"""
from testapp.models import TestModel
from phonenumber_field.phonenumber import PhoneNumber
tm = TestModel()
tm.phone = '+41 52 424 2424'
tm.full_clean()
tm.save()
pk = tm.id
tm = TestModel.objects.get(pk=pk)
self.assertTrue(isinstance(tm.phone, PhoneNumber))
self.assertEqual(str(tm.phone), '+41524242424')
self.assertEqual(
1,
TestModel.objects
.exclude(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
self.assertEqual(
0,
TestModel.objects
.filter(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
def test_save_blank_phone_to_database(self):
"""Field Test for when Blank"""
from testapp.models import TestModelPhoneB as TestModel
tm = TestModel()
tm.save()
pk = tm.id
tm = TestModel.objects.get(pk=pk)
self.assertEqual(tm.phone, '')
self.assertEqual(
0,
TestModel.objects
.exclude(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
self.assertEqual(
1,
TestModel.objects
.filter(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
def __test_nullable_field_helper(self, TestModel):
"""Helper method for the next four tests."""
tm = TestModel()
tm.save()
pk = tm.id
tm = TestModel.objects.get(pk=pk)
self.assertIsNone(tm.phone)
self.assertEqual(
0,
TestModel.objects
.exclude(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
self.assertEqual(
1,
TestModel.objects
.filter(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
# ensure that null values do not cause uniqueness conflicts
self.assertEqual(TestModel.objects.count(), 1)
TestModel.objects.create()
self.assertEqual(TestModel.objects.count(), 2)
def test_save_unique_null_phone_to_database(self):
"""Field Test for when Null & Unique"""
from testapp.models import TestModelPhoneNU as TestModel
self.__test_nullable_field_helper(TestModel)
def test_save_unique_null_blank_phone_to_database(self):
"""Field Test for when Blank, Null & Unique"""
from testapp.models import TestModelPhoneBNU as TestModel
self.__test_nullable_field_helper(TestModel)
def test_save_unique_null_none_phone_to_database(self):
"""Field Test for when No Default, Null & Unique"""
from testapp.models import TestModelPhoneNDNU as TestModel
self.__test_nullable_field_helper(TestModel)
def test_save_unique_null_blank_none_phone_to_database(self):
"""Field Test for when Blank, No Default, Null & Unique"""
from testapp.models import TestModelPhoneBNDNU as TestModel
self.__test_nullable_field_helper(TestModel)
def __test_nullable_default_field_helper(self, TestModel):
"""Helper method for the next two tests."""
tm = TestModel()
tm.save()
pk = tm.id
tm = TestModel.objects.get(pk=pk)
self.assertEqual(str(tm.phone), '+41524242424')
self.assertEqual(
1,
TestModel.objects
.exclude(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
self.assertEqual(
0,
TestModel.objects
.filter(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
def test_save_unique_null_default_phone_to_database(self):
"""Field Test for when Default, Null & Unique"""
from testapp.models import TestModelPhoneDNU as TestModel
self.__test_nullable_default_field_helper(TestModel)
def test_save_unique_null_blank_default_phone_to_database(self):
"""Field Test for when Blank, Default, Null & Unique"""
from testapp.models import TestModelPhoneBDNU as TestModel
self.__test_nullable_default_field_helper(TestModel)
def __test_nullable_empty_default_field_helper(self, TestModel):
"""Helper method for the next two tests."""
tm = TestModel()
tm.save()
pk = tm.id
tm = TestModel.objects.get(pk=pk)
self.assertEqual(tm.phone, '')
self.assertEqual(
0,
TestModel.objects
.exclude(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
self.assertEqual(
1,
TestModel.objects
.filter(Q(phone__isnull=True) | Q(phone=''))
.filter(pk=pk)
.count())
def test_save_unique_null_default_empty_phone_to_database(self):
"""Field Test for when Empty Default, Null & Unique"""
from testapp.models import TestModelPhoneEDNU as TestModel
self.__test_nullable_empty_default_field_helper(TestModel)
def test_save_unique_null_blank_default_empty_phone_to_database(self):
"""Field Test for when Blank, Empty Default, Null & Unique"""
from testapp.models import TestModelPhoneBEDNU as TestModel
self.__test_nullable_empty_default_field_helper(TestModel)
| hovel/django-phonenumber-field | testproject/testapp/tests.py | Python | mit | 5,964 |
# GNU Mailutils -- a suite of utilities for electronic mail
# Copyright (C) 2009-2012 Free Software Foundation, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
from mailutils.c_api import body
from mailutils import stream
from mailutils.error import BodyError
class Body:
def __init__ (self, bd):
self.bd = bd
def __del__ (self):
del self.bd
def __getattr__ (self, name):
if name == 'size':
return self.get_size ()
elif name == 'lines':
return self.get_lines ()
else:
raise AttributeError, name
def __len__ (self):
return self.get_size ()
def get_size (self):
status, size = body.size (self.bd)
if status:
raise BodyError (status)
return size
def get_lines (self):
status, lines = body.lines (self.bd)
if status:
raise BodyError (status)
return lines
def get_stream (self):
status, stm = body.get_stream (self.bd)
if status:
raise BodyError (status)
return stream.Stream (stm)
| Distrotech/mailutils | python/mailutils/body.py | Python | gpl-3.0 | 1,736 |
class DfaError(Exception):
pass
tokens = []
spaces = 0
def token(tok, lexeme):
tokens.append((tok,lexeme))
def space():
global spaces
spaces += 1
def error(c):
raise DfaError("unknown char \'%s\'" % c)
actions = {
-1: error,
0: lambda _: space(),
3: lambda lexeme: token("FOR", lexeme),
6: lambda lexeme: token("NEW", lexeme)
}
# space is a separator, the start state has a transition to itself on " ", and all accepting states have transitions on " " to the start state.
example = {
(0, " "): 0,
(3, " "): 0,
(6, " "): 0,
(0, "f"): 1, (1, "o"): 2, (2, "r"): 3,
(0, "n"): 4, (4, "e"): 5, (5, "w"): 6
}
F = [3, 6]
while True:
simulate2(raw_input(), example, 0, F, actions)
print tokens
print spaces
| dxhj/stuff-about-parsing | lexical_analysis/examples/simulate2_example.py | Python | gpl-2.0 | 729 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-13 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passwordless', '0003_apppassword_name'),
]
operations = [
migrations.AlterModelOptions(
name='apppassword',
options={'ordering': ['created_on']},
),
migrations.AddField(
model_name='user',
name='jid_node',
field=models.CharField(max_length=30, null=True),
),
]
| Kromey/fbxnano | passwordless/migrations/0004_auto_20161013_0915.py | Python | mit | 594 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
import os
import signal
import select
import socket
import sys
import traceback
import time
import gc
from errno import EINTR, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN, SIGINT
from pyspark.worker import main as worker_main
from pyspark.serializers import read_int, write_int
def compute_real_exit_code(exit_code):
# SystemExit's code can be integer or string, but os._exit only accepts integers
if isinstance(exit_code, numbers.Integral):
return exit_code
else:
return 1
def worker(sock):
"""
Called by a worker process after the fork().
"""
signal.signal(SIGHUP, SIG_DFL)
signal.signal(SIGCHLD, SIG_DFL)
signal.signal(SIGTERM, SIG_DFL)
# restore the handler for SIGINT,
# it's useful for debugging (show the stacktrace before exit)
signal.signal(SIGINT, signal.default_int_handler)
# Read the socket using fdopen instead of socket.makefile() because the latter
# seems to be very slow; note that we need to dup() the file descriptor because
# otherwise writes also cause a seek that makes us miss data on the read side.
infile = os.fdopen(os.dup(sock.fileno()), "rb", 65536)
outfile = os.fdopen(os.dup(sock.fileno()), "wb", 65536)
exit_code = 0
try:
worker_main(infile, outfile)
except SystemExit as exc:
exit_code = compute_real_exit_code(exc.code)
finally:
try:
outfile.flush()
except Exception:
pass
return exit_code
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
# re-open stdin/stdout in 'wb' mode
stdin_bin = os.fdopen(sys.stdin.fileno(), 'rb', 4)
stdout_bin = os.fdopen(sys.stdout.fileno(), 'wb', 4)
write_int(listen_port, stdout_bin)
stdout_bin.flush()
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
signal.signal(SIGCHLD, SIG_IGN)
reuse = os.environ.get("SPARK_REUSE_WORKER")
# Initialization complete
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
if 0 in ready_fds:
try:
worker_pid = read_int(stdin_bin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile(mode='wb')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
try:
# Acknowledge that the fork was successful
outfile = sock.makefile(mode="wb")
write_int(os.getpid(), outfile)
outfile.flush()
outfile.close()
while True:
code = worker(sock)
if not reuse or code:
# wait for closing
try:
while sock.recv(1024):
pass
except Exception:
pass
break
gc.collect()
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| alec-heif/MIT-Thesis | spark-bin/python/pyspark/daemon.py | Python | mit | 6,127 |
import unittest
from exporters.readers.base_reader import BaseReader
from exporters.readers.random_reader import RandomReader
from .utils import meta
class BaseReaderTest(unittest.TestCase):
def setUp(self):
self.reader = BaseReader({}, meta())
def test_get_next_batch_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.reader.get_next_batch()
def test_set_last_position(self):
self.reader.set_last_position(dict(position=5))
self.assertEqual(self.reader.last_position, dict(position=5))
class RandomReaderTest(unittest.TestCase):
def setUp(self):
self.options = {
'exporter_options': {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline'
},
'reader': {
'name': 'exporters.readers.random_reader.RandomReader',
'options': {
'number_of_items': 1000,
'batch_size': 100
}
},
}
self.reader = RandomReader(self.options, meta())
self.reader.set_last_position(None)
def test_get_next_batch(self):
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
def test_get_second_batch(self):
self.reader.get_next_batch()
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
self.assertEqual(self.reader.get_metadata('read_items'),
self.options['reader']['options']['batch_size'])
def test_get_all(self):
total_items = 0
while not self.reader.finished:
batch = list(self.reader.get_next_batch())
total_items += len(batch)
self.assertEqual(total_items, self.options['reader']['options']['number_of_items'])
def test_set_last_position_none(self):
self.reader.set_last_position({'last_read': 123})
self.assertEqual({'last_read': 123}, self.reader.last_position)
| scrapinghub/exporters | tests/test_readers.py | Python | bsd-3-clause | 2,129 |
from django.conf.urls import patterns, url
from links import views
urlpatterns = patterns('links.views',
url(r'^link/settings/$', views.settings, name = 'settings'),
url(r'^link/stats/$', views.stats, name = 'stats'),
)
| ethanperez/t4k-rms | templates/dashboard/urls.py | Python | mit | 229 |
"""
matrixoff.py
Script is to drive the 16 pin 8x8 led matrix using WiringPi and Python.
by Evan Thompson
January 3, 2017
https://github.com/ethom7/ledmatrix
WiringPi Rpi PIN LED PIN
0 BCM 17 11 1
1 BCM 18 12 2
2 BCM 27 13 3
3 BCM 22 15 4
4 BCM 23 16 5
5 BCM 24 18 6
6 BCM 25 22 7
7 BCM 4 7 8
8 BCM 2 3 9
9 BCM 3 5 10
10 BCM 8 24 11
11 BCM 7 26 12
21 BCM 5 29 13
13 BCM 9 21 14
MATRIX
1 2 3 4 5 6 7 8
A X X X X X X X X
B X X X X X X X X
C X X X X X X X X
D X X X X X X X X
E X X X X X X X X
F X X X X X X X X
G X X X X X X X X
H X X X X X X X X
"""
##---Imports------------------------------------------------------------------#
import sys
import wiringpi as wpi
##---Variables----------------------------------------------------------------#
# pin to wiringPi for rows and columns
ROW1 = 8
ROW2 = 13
ROW3 = 7
ROW4 = 11
ROW5 = 0
ROW6 = 6
ROW7 = 1
ROW8 = 4
COL1 = 21
COL2 = 2
COL3 = 3
COL4 = 9
COL5 = 5
COL6 = 10
COL7 = 14
COL8 = 15
HIGH = 1
LOW = 0
#list containing the pin reference for the row pins
rows = [ROW1, ROW2, ROW3, ROW4, ROW5, ROW6, ROW7, ROW8]
#list containing the pin reference for the column pins
cols = [COL1, COL2, COL3, COL4, COL5, COL6, COL7, COL8]
## to reduce possibility of incorrect setup set each pin individually
wpipinslist = [
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 21, 13,
14, 15
]
##---Functions----------------------------------------------------------------#
''' setup function '''
def setup():
''' http://raspi.tv/2013/how-to-use-wiringpi2-for-python-on-the-raspberry-pi-in-raspbian '''
wpi.wiringPiSetup() # use wiringPi pin numbers
#wpi.wiringPiSetupGpio() # use gpio port numbers
## wpi.pinMode(pin_number, usage_code)
## usage_code: 0 for input, 1 for output, 2 for alt. function
useage = 1
##wpipinslist is defined and set in variables
## set all pins
for pin in wpipinslist:
wpi.pinMode(pin, useage)
## turn off all pins
alloff()
##---Helper Functions---------------------------------------------------------#
''' function to set all pins off in the matrix '''
def alloff():
for pin in wpipinslist:
wpi.digitalWrite(pin, 0) # turn all pins off
##---Main---------------------------------------------------------------------#
def main():
setup()
# success
if __name__ == '__main__':
status = main()
sys.exit(status)
| ethom7/ledmatrix | matrixoff.py | Python | mit | 2,439 |
# mode: run
# tag: generators, lambda
try:
from builtins import next # Py3k
except ImportError:
def next(it):
return it.next()
def test_inside_lambda():
"""
>>> obj = test_inside_lambda()()
>>> next(obj)
1
>>> next(obj)
2
>>> next(obj)
Traceback (most recent call last):
StopIteration
"""
return lambda:((yield 1), (yield 2))
| larsmans/cython | tests/run/yield_inside_lambda.py | Python | apache-2.0 | 388 |
from datetime import datetime, timedelta
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
import mock
import pytest
from olympia.amo.tests import BaseTestCase, TestCase
from olympia.amo import decorators, get_user, set_user
from olympia.amo.urlresolvers import reverse
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_post_required():
def func(request):
return mock.sentinel.response
g = decorators.post_required(func)
request = mock.Mock()
request.method = 'GET'
assert isinstance(g(request), http.HttpResponseNotAllowed)
request.method = 'POST'
assert g(request) == mock.sentinel.response
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
assert response.content == '{"x": 1}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 200
def test_json_view_normal_response():
"""Normal responses get passed through."""
expected = http.HttpResponseForbidden()
def func(request):
return expected
response = decorators.json_view(func)(mock.Mock())
assert expected is response
assert response['Content-Type'] == 'text/html; charset=utf-8'
def test_json_view_error():
"""json_view.error returns 400 responses."""
response = decorators.json_view.error({'msg': 'error'})
assert isinstance(response, http.HttpResponseBadRequest)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
def test_json_view_status():
def func(request):
return {'x': 1}
response = decorators.json_view(func, status_code=202)(mock.Mock())
assert response.status_code == 202
def test_json_view_response_status():
response = decorators.json_response({'msg': 'error'}, status_code=202)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 202
class TestTaskUser(TestCase):
fixtures = ['base/users']
def test_set_task_user(self):
@decorators.set_task_user
def some_func():
return get_user()
set_user(UserProfile.objects.get(username='regularuser'))
assert get_user().pk == 999
assert some_func().pk == int(settings.TASK_USER_ID)
assert get_user().pk == 999
class TestLoginRequired(BaseTestCase):
def setUp(self):
super(TestLoginRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
self.request.user.is_authenticated.return_value = False
self.request.get_full_path.return_value = 'path'
def test_normal(self):
func = decorators.login_required(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == (
'%s?to=%s' % (reverse('users.login'), 'path'))
def test_no_redirect(self):
func = decorators.login_required(self.f, redirect=False)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_decorator_syntax(self):
# @login_required(redirect=False)
func = decorators.login_required(redirect=False)(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_no_redirect_success(self):
func = decorators.login_required(redirect=False)(self.f)
self.request.user.is_authenticated.return_value = True
func(self.request)
assert self.f.called
class TestSetModifiedOn(TestCase):
fixtures = ['base/users']
@decorators.set_modified_on
def some_method(self, worked):
return worked
def test_set_modified_on(self):
users = list(UserProfile.objects.all()[:3])
self.some_method(True, set_modified_on=users)
for user in users:
assert UserProfile.objects.get(pk=user.pk).modified.date() == (
datetime.today().date())
def test_not_set_modified_on(self):
yesterday = datetime.today() - timedelta(days=1)
qs = UserProfile.objects.all()
qs.update(modified=yesterday)
users = list(qs[:3])
self.some_method(False, set_modified_on=users)
for user in users:
date = UserProfile.objects.get(pk=user.pk).modified.date()
assert date < datetime.today().date()
class TestPermissionRequired(TestCase):
def setUp(self):
super(TestPermissionRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_not_allowed(self, action_allowed):
action_allowed.return_value = False
func = decorators.permission_required('', '')(self.f)
with self.assertRaises(PermissionDenied):
func(self.request)
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed(self, action_allowed):
action_allowed.return_value = True
func = decorators.permission_required('', '')(self.f)
func(self.request)
assert self.f.called
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed_correctly(self, action_allowed):
func = decorators.permission_required('Admin', '%')(self.f)
func(self.request)
action_allowed.assert_called_with(self.request, 'Admin', '%')
| andymckay/addons-server | src/olympia/amo/tests/test_decorators.py | Python | bsd-3-clause | 5,822 |
from __future__ import absolute_import
import numpy as np
from autograd.core import (Node, FloatNode, primitive, cast,
differentiable_ops, nondifferentiable_ops, getval)
from . import numpy_wrapper as anp
from .use_gpu_numpy import use_gpu_numpy
@primitive
def take(A, idx):
return A[idx]
def make_grad_take(ans, A, idx):
return lambda g : untake(g, idx, A)
take.defgrad(make_grad_take)
@primitive
def untake(x, idx, template):
return array_dtype_mappings[template.dtype].new_sparse_array(template, idx, x)
untake.defgrad(lambda ans, x, idx, template : lambda g : take(g, idx))
untake.defgrad_is_zero(argnums=(1, 2))
Node.__array_priority__ = 90.0
class ArrayNode(Node):
__slots__ = []
__getitem__ = take
__array_priority__ = 100.0
# Constants w.r.t float data just pass though
shape = property(lambda self: self.value.shape)
ndim = property(lambda self: self.value.ndim)
size = property(lambda self: self.value.size)
dtype = property(lambda self: self.value.dtype)
T = property(lambda self: anp.transpose(self))
def __len__(self):
return len(self.value)
@staticmethod
def zeros_like(value):
return anp.zeros(value.shape)
@staticmethod
def sum_outgrads(outgrads):
if len(outgrads) is 1 and not isinstance(getval(outgrads[0]), SparseArray):
return outgrads[0]
else:
return primitive_sum_arrays(*outgrads)
@staticmethod
def cast(value, example):
result = arraycast(value)
if result.shape != example.shape:
result = result.reshape(example.shape)
return result
@staticmethod
def new_sparse_array(template, idx, x):
return SparseArray(template, idx, x)
def __neg__(self): return anp.negative(self)
def __add__(self, other): return anp.add( self, other)
def __sub__(self, other): return anp.subtract(self, other)
def __mul__(self, other): return anp.multiply(self, other)
def __pow__(self, other): return anp.power (self, other)
def __div__(self, other): return anp.divide( self, other)
def __mod__(self, other): return anp.mod( self, other)
def __truediv__(self, other): return anp.true_divide(self, other)
def __radd__(self, other): return anp.add( other, self)
def __rsub__(self, other): return anp.subtract(other, self)
def __rmul__(self, other): return anp.multiply(other, self)
def __rpow__(self, other): return anp.power( other, self)
def __rdiv__(self, other): return anp.divide( other, self)
def __rmod__(self, other): return anp.mod( other, self)
def __rtruediv__(self, other): return anp.true_divide(other, self)
def __eq__(self, other): return anp.equal(self, other)
def __ne__(self, other): return anp.not_equal(self, other)
def __gt__(self, other): return anp.greater(self, other)
def __ge__(self, other): return anp.greater_equal(self, other)
def __lt__(self, other): return anp.less(self, other)
def __le__(self, other): return anp.less_equal(self, other)
def new_array_node(value, tapes):
try:
return array_dtype_mappings[value.dtype](value, tapes)
except KeyError:
raise TypeError("Can't differentiate wrt numpy arrays of dtype {0}".format(value.dtype))
Node.type_mappings[anp.ndarray] = new_array_node
array_dtype_mappings = {}
for float_type in [anp.float64, anp.float32, anp.float16]:
array_dtype_mappings[anp.dtype(float_type)] = ArrayNode
Node.type_mappings[float_type] = FloatNode
if use_gpu_numpy():
@primitive
def arraycast(val):
if isinstance(val, float):
return anp.array(val)
elif anp.is_garray(val):
return anp.array(val, dtype=anp.float64)
elif anp.iscomplexobj(val):
return anp.array(anp.real(val))
else:
raise TypeError("Can't cast type {0} to array".format(type(val)))
else:
@primitive
def arraycast(val):
if isinstance(val, float):
return anp.array(val)
elif anp.iscomplexobj(val):
return anp.array(anp.real(val))
else:
raise TypeError("Can't cast type {0} to array".format(type(val)))
arraycast.defgrad(lambda ans, val: lambda g : g)
@primitive
def primitive_sum_arrays(*arrays):
new_array = type(new_array_node(arrays[0], [])).zeros_like(arrays[0]) # TODO: simplify this
for array in arrays:
if isinstance(array, SparseArray):
np.add.at(new_array, array.idx, array.val)
else:
new_array += array
return new_array
primitive_sum_arrays.gradmaker = lambda *args : lambda g : g
# These numpy.ndarray methods are just refs to an equivalent numpy function
nondiff_methods = ['all', 'any', 'argmax', 'argmin', 'argpartition',
'argsort', 'nonzero', 'searchsorted', 'round']
diff_methods = ['clip', 'compress', 'cumprod', 'cumsum', 'diagonal',
'max', 'mean', 'min', 'prod', 'ptp', 'ravel', 'repeat',
'reshape', 'squeeze', 'std', 'sum', 'swapaxes', 'take',
'trace', 'transpose', 'var']
for method_name in nondiff_methods + diff_methods:
setattr(ArrayNode, method_name, anp.__dict__[method_name])
# Flatten has no function, only a method.
setattr(ArrayNode, 'flatten', anp.__dict__['ravel'])
# Replace FloatNode operators with broadcastable versions
for method_name in differentiable_ops + nondifferentiable_ops + ['__truediv__', '__rtruediv__']:
setattr(FloatNode, method_name, ArrayNode.__dict__[method_name])
# ----- Special type for efficient grads through indexing -----
class SparseArray(object):
__array_priority__ = 150.0
def __init__(self, template, idx, val):
self.shape = template.shape
self.idx = idx
self.val = val
self.dtype = template.dtype
Node.type_mappings[SparseArray] = ArrayNode
| kcarnold/autograd | autograd/numpy/numpy_extra.py | Python | mit | 5,915 |
from rest_framework import serializers
from .models import Athlete_Finance
class FinanceSerializer(serializers.ModelSerializer):
class Meta:
model = Athlete_Finance
fields = ("__all__") | asmltd/gosports | webapp/athlete/finance/serializers.py | Python | gpl-3.0 | 208 |
import numpy as np
class PointBrowser:
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None: return
if event.key not in ('n', 'p'): return
if event.key=='n': inc = 1
else: inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs)-1)
self.update()
def onpick(self, event):
if event.artist!=line: return True
N = len(event.ind)
if not N: return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x-xs[event.ind], y-ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None: return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f'%(xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d'%dataind)
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
| cactusbin/nyt | matplotlib/examples/event_handling/data_browser.py | Python | unlicense | 2,233 |
import decimal
import sys
import steel
from steel import chunks
COMPRESSION_CHOICES = (
(0, 'zlib/deflate'),
)
RENDERING_INTENT_CHOICES = (
(0, 'Perceptual'),
(1, 'Relative Colorimetric'),
(2, 'Saturation'),
(3, 'Absolute Colorimetric'),
)
PHYSICAL_UNIT_CHOICES = (
(0, '<Unknown Unit>'),
(1, 'Meters'),
)
FILTER_CHOICES = (
(0, 'Adaptive Filtering'),
)
INTERLACE_CHOICES = (
(0, '<No Interlacing>'),
(1, 'Adam7'),
)
class Chunk(chunks.Chunk, encoding='ascii'):
"""
A special chunk for PNG, which puts the size before the type
and includes a CRC field for verifying data integrity.
"""
size = steel.Integer(size=4)
id = steel.String(size=4)
payload = chunks.Payload(size=size)
crc = steel.CRC32(first=id)
@property
def is_critical(self):
# Critical chunks will always have an uppercase letter for the
# first character in the type. Ancillary will always be lower.
return self.type[0].upper() == self.type[0]
@property
def is_public(self):
# Public chunks will always have an uppercase letter for the
# second character in the type. Private will always be lower.
return self.type[1].upper() == self.type[1]
@Chunk('IHDR')
class Header(steel.Structure):
width = steel.Integer(size=4)
height = steel.Integer(size=4)
bit_depth = steel.Integer(size=1, choices=(1, 2, 4, 8, 16))
color_type = steel.Integer(size=1, choices=(0, 2, 3, 4, 6))
compression_method = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
filter_method = steel.Integer(size=1, choices=FILTER_CHOICES)
interlace_method = steel.Integer(size=1, choices=INTERLACE_CHOICES)
class HundredThousand(steel.Integer):
"""
Value is usable as a Decimal in Python, but stored
as an integer after multiplying the value by 100,000
"""
def __init__(self):
super(HundredThousand, self).__init__(size=4)
def decode(self, value):
value = super(HundredThousand, self).decode(value)
return decimal.Decimal('0.%05s' % value)
def encode(self, obj, value):
return super(HundredThousand, self).encode(obj, int(value * 100000))
@Chunk('cHRM')
class Chromaticity(steel.Structure):
white_x = HundredThousand()
white_y = HundredThousand()
red_x = HundredThousand()
red_y = HundredThousand()
green_x = HundredThousand()
green_y = HundredThousand()
blue_x = HundredThousand()
blue_y = HundredThousand()
@Chunk('gAMA')
class Gamma(steel.Structure):
value = HundredThousand()
@Chunk('iCCP')
class ICCProfile(steel.Structure):
name = steel.String(encoding='latin-1')
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
profile = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('sBIT')
class SignificantBits(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('sRGB')
class sRGB(steel.Structure):
rendering_intent = steel.Integer(size=1, choices=RENDERING_INTENT_CHOICES)
class PaletteColor(steel.Structure):
red = steel.Integer(size=1)
green = steel.Integer(size=1)
blue = steel.Integer(size=1)
@Chunk('PLTE')
class Palette(steel.Structure):
colors = steel.List(steel.SubStructure(PaletteColor), size=steel.Remainder)
def __iter__(self):
return iter(self.colors)
@Chunk('bKGD')
class Background(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('hIST')
class Histogram(steel.Structure):
frequencies = steel.List(steel.Integer(size=2), size=steel.Remainder)
@Chunk('tRNS')
class Transparency(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('IDAT', multiple=True)
class Data(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
@Chunk('pHYs')
class PhysicalDimentions(steel.Structure):
x = steel.Integer(size=4)
y = steel.Integer(size=4)
unit = steel.Integer(size=1, choices=PHYSICAL_UNIT_CHOICES)
class SuggestedPaletteEntry(steel.Structure):
red = steel.Integer(size=2)
green = steel.Integer(size=2)
blue = steel.Integer(size=2)
alpha = steel.Integer(size=2)
frequency = steel.Integer(size=2)
# TODO: figure out a good way to handle size based on sample_depth below
@Chunk('sPLT')
class SuggestedPalette(steel.Structure):
name = steel.String(encoding='latin-1')
sample_depth = steel.Integer(size=1)
colors = steel.List(steel.SubStructure(SuggestedPaletteEntry), size=steel.Remainder)
@Chunk('tIME')
class Timestamp(steel.Structure):
year = steel.Integer(size=2)
month = steel.Integer(size=1)
day = steel.Integer(size=1)
hour = steel.Integer(size=1)
minute = steel.Integer(size=1)
second = steel.Integer(size=1)
# TODO: convert this into a datetime object
@Chunk('tEXt', multiple=True)
class Text(steel.Structure, encoding='latin-1'):
keyword = steel.String()
content = steel.String(size=steel.Remainder)
@Chunk('zTXt', multiple=True)
class CompressedText(steel.Structure, encoding='latin-1'):
keyword = steel.String()
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('iTXt', multiple=True)
class InternationalText(steel.Structure, encoding='utf8'):
keyword = steel.String()
is_compressed = steel.Integer(size=1)
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
language = steel.String()
translated_keyword = steel.String()
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('IEND')
class End(steel.Structure):
pass
class PNG(steel.Structure):
signature = steel.FixedString(b'\x89PNG\x0d\x0a\x1a\x0a')
header = steel.SubStructure(Header)
chunks = chunks.ChunkList(Chunk, (Header, Chromaticity, Gamma, ICCProfile,
SignificantBits, sRGB, Palette, Background,
Histogram, Transparency, PhysicalDimentions,
SuggestedPalette, Data, Timestamp, Text,
CompressedText, InternationalText), terminator=End)
@property
def data_chunks(self):
for chunk in self.chunks:
if isinstance(chunk, Data):
yield chunk
if __name__ == '__main__':
png = PNG(open(sys.argv[1], 'rb'))
print('%s x %s' % (png.header.width, png.header.height))
print(list(png.data_chunks))
| gulopine/steel | examples/images/png.py | Python | bsd-3-clause | 6,898 |
#!/usr/bin/env python
''' file: custom/optical.py '''
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import Intf, Link
from mininet.node import RemoteController
class NullIntf( Intf ):
"A dummy interface with a blank name that doesn't do any configuration"
def __init__( self, name, **params ):
self.name = ''
class NullLink( Link ):
"A dummy link that doesn't touch either interface"
def makeIntfPair( cls, intf1, intf2, *args, **kwargs ):
pass
def delete( self ):
pass
class OpticalTopo(Topo):
def addIntf( self, switch, intfName ):
"Add intf intfName to switch"
self.addLink( switch, switch, cls=NullLink,
intfName1=intfName, cls2=NullIntf )
def __init__(self):
# Initialize topology
Topo.__init__(self)
# Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
s1 = self.addSwitch('s1', dpid="0000ffffffff0001")
s2 = self.addSwitch('s2', dpid="0000ffffffff0002")
s3 = self.addSwitch('s3', dpid="0000ffffffff0003")
s4 = self.addSwitch('s4', dpid="0000ffffffff0004")
s5 = self.addSwitch('s5', dpid="0000ffffffff0005")
s6 = self.addSwitch('s6', dpid="0000ffffffff0006")
# Add links from hosts to OVS
self.addLink(s1, h1)
self.addLink(s2, h2)
self.addLink(s3, h3)
self.addLink(s4, h4)
self.addLink(s5, h5)
self.addLink(s6, h6)
# add links from ovs to linc-oe
# sorry about the syntax :(
self.addIntf(s1,'tap29')
self.addIntf(s2,'tap30')
self.addIntf(s3,'tap31')
self.addIntf(s4,'tap32')
self.addIntf(s5,'tap33')
self.addIntf(s6,'tap34')
# if you use, sudo mn --custom custom/optical.py, then register the topo:
topos = {'optical': ( lambda: OpticalTopo() )}
def run():
c = RemoteController('c','127.0.0.1',6653)
net = Mininet( topo=OpticalTopo(),controller=None,autoSetMacs=True)
net.addController(c)
net.start()
#installStaticFlows( net )
CLI( net )
net.stop()
# if the script is run directly (sudo custom/optical.py):
if __name__ == '__main__':
setLogLevel('info')
run()
| mengmoya/onos | tools/test/topos/optical2.py | Python | apache-2.0 | 2,477 |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 11/4/14
This script reads in a tab delimited file of annotations and queries
asynchronously the MGRAST REST API to parse back the original ncbi tax_id entry.
This script then uses the ncbi tax_id to query the taxonomic lineage.
Input file format:
query sequence id hit m5nr id (md5sum) percentage identity alignment length, number of mismatches number of gap openings query start query end hit start hit end e-value bit score semicolon separated list of annotations
mgm4581121.3|contig-1350000035_45_1_2592_+ 0000679ceb3fc9c950779468e06329a7 61.03 136 53 654 789 366 501 6.80E-44 175 hyalin repeat protein
mgm4581121.3|contig-18000183_1_1_2226_+ 0000679ceb3fc9c950779468e06329a7 64.44 45 16 525 569 457 501 1.70E-08 57 hyalin repeat protein
['Download complete. 78538 rows retrieved']
MGRAST REST API:
http://api.metagenomics.anl.gov/m5nr/md5/<M5nr MD5 hash>?source=GenBank
e.g. http://api.metagenomics.anl.gov/m5nr/md5/000821a2e2f63df1a3873e4b280002a8?source=GenBank
resources:
http://api.metagenomics.anl.gov/api.html#m5nr
http://angus.readthedocs.org/en/2014/howe-mgrast.html
Returns:
{"next":null,"prev":null,"version":"10","url":"http://api.metagenomics.anl.gov//m5nr/md5/000821a2e2f63df1a3873e4b280002a8?source=GenBank&offset=0","data":[{"source":"GenBank","function":"sulfatase","ncbi_tax_id":399741,"accession":"ABV39241.1","type":"protein","organism":"Serratia proteamaculans 568","md5":"000821a2e2f63df1a3873e4b280002a8","alias":["GI:157320144"]}],"limit":10,"total_count":1,"offset":0}
This output will then be stored in a buffer and queried for the exact id and grab the xml based lineage
http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=399741
<TaxaSet><Taxon><TaxId>399741</TaxId><ScientificName>Serratia proteamaculans 568</ScientificName><OtherNames><EquivalentName>Serratia proteamaculans str. 568</EquivalentName><EquivalentName>Serratia proteamaculans strain 568</EquivalentName></OtherNames><ParentTaxId>28151</ParentTaxId><Rank>no rank</Rank><Division>Bacteria</Division><GeneticCode><GCId>11</GCId><GCName>Bacterial, Archaeal and Plant Plastid</GCName></GeneticCode><MitoGeneticCode><MGCId>0</MGCId><MGCName>Unspecified</MGCName></MitoGeneticCode><Lineage>cellular organisms; Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales; Enterobacteriaceae; Serratia; Serratia proteamaculans</Lineage>
Output file format:
A tab delimited file of
contig-faa-name\tlineage
an error.log of mismatches from both MGRAST and NCBI is also generated
--------------------------------------------------------------------------------
usage: query_ncbi_lineage_from_mgrast_md5.py -i mgrast_organism.txt -o output.file
"""
#-------------------------------------------------------------------------------
#
##http thread pool from: http://stackoverflow.com/questions/2632520/what-is-the-fastest-way-to-send-100-000-http-requests-in-python
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from threading import Thread
import sys
import time
import requests
import json
from Queue import Queue
#-------------------------------------------------------------------------------
#function declarations
def doWork():
while not exitapp:
id, name, mgrast_urlstring = q.get()
if id % 100 == 0:
print 'Query: HTTP Thread: ' + str(id) + ' started.'
try:
mgrast_response = requests.get(url=mgrast_urlstring, timeout=10)
if mgrast_response.status_code == 200:
json_data = json.loads(mgrast_response.text)
if json_data['data']!= []:
if 'ncbi_tax_id' in json_data['data'][0]:
eutils_urlstring = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=' + str(json_data['data'][0]['ncbi_tax_id'])
eutils_response = requests.get(url=eutils_urlstring, timeout=10)
if eutils_response.status_code == 200:
if '<Lineage>' in eutils_response.text:
output_dict[name] = eutils_response.text.split('Lineage>')[1][0:-2]
else:
output_dict[name] = 'No NCBI'
else:
print 'HTTP error, Thread: ' + str(id) + ' in eutils worker with error: ' + eutils_response.reason
logfile.write(str(id) + '\t' + urlstring + '\t' + eutils_response.reason + '\n')
raise
else:
output_dict[name] = 'No MGRAST tax ID'
else:
output_dict[name] = 'No MGRAST source data'
else:
print 'HTTP error, Thread: ' + str(id) + ' in MG-RAST worker with error: ' + mgrast_response.reason
logfile.write(str(id) + '\t' + urlstring + '\t' + mgrast_response.reason + '\n')
raise
except:
print 'Thread: ' + str(id) + '. Error. '
print sys.exc_info()[0]
q.task_done()
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "query_ncbi_lineage_from_mgrast_md5.py -i \
mgrast_organism.txt -o output.file",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_file", action="store",
dest="inputfilename",
help="tab-delimited MGRAST organism file")
parser.add_argument("-o", "--output_filename", action="store",
dest="outputfilename",
help="tab-delimited output file")
options = parser.parse_args()
mandatories = ["outputfilename","inputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
outputfilename = options.outputfilename
infile_list = []
with open(inputfilename,'U') as infile:
infile_list = [line.strip().split('\t') for line in infile]
infile.close()
urlpool = []
name_list = []
for entry in infile_list[1:-1]:
contig_name = entry[0]
md5_hash = entry[1]
urlpool.append([contig_name, 'http://api.metagenomics.anl.gov/m5nr/md5/' + md5_hash + '?source=RefSeq'])
name_list.append(contig_name)
concurrent = 10
exitapp = False
output_dict = {}
print "Querying MGRAST REST API Service..."
with open('./' + outputfilename + '.errorlog.txt','w') as logfile:
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
time.sleep(1)
t.start()
try:
for id, url_load in enumerate(urlpool):
q.put([id] + url_load)
q.join()
except KeyboardInterrupt:
exitapp = True
sys.exit(1)
logfile.close()
logfile.close()
print "Matching taxonomies and writing..."
with open(outputfilename, 'w') as outfile:
for name in name_list:
if name in output_dict:
outfile.write(name + '\t' + output_dict[name] + '\n')
else:
outfile.write(name + '\t' + 'None\n')
outfile.close()
print "Done!"
| leejz/meta-omics-scripts | query_ncbi_lineage_from_mgrast_md5.py | Python | mit | 8,172 |
#!/usr/bin/env python3
# Copyright 2015-2016 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from litmus.device.device import device
class devicexu3(device):
"""docstring for device"""
pass
| dhs-shine/litmus | litmus/device/devicexu3.py | Python | apache-2.0 | 728 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import cPickle
# SparseMatrix is a versatile class that offers a wide range of functionality.
# This tutorial will introduce you to the main features of SparseMatrix.
# SparseMatrix is located in nupic.bindings.math, and here is the import you need:
from nupic.bindings.math import *
# 1. Types of sparse matrices:
# ===========================
# There are three types of SparseMatrix, depending on the precision you need
# in your application: 32 and 32 bits. To create a SparseMatrix holding
# floating point values of the desired precision, simply specify it as the
# 'dtype' parameter in the constructor:
s = SparseMatrix(dtype='Float32')
# 2. Global Epsilon:
# =================
# By default, NuPIC is compiled to handle only 32 bits of precision at max,
# and sparse matrices consider a floating point value to be zero if it's less than
# 1e-6 (the best precision possible with 32 bits floats). This value of 1e-6 is
# called "epsilon", and it is a global value used throughout NuPIC to deal with
# near-zero floating point numbers.
# If this is not enough, NuPIC can be recompiled to access more precision.
# With NTA_DOUBLE_PRECISION or NTA_QUAD_PRECISION set at compile time, NuPIC can
# use 32 bits to represent floating point values. The global epsilon can
# then be set to smaller values via the variable nupic::Epsilon in nupic/math/math.hpp
print '\nGlobal epsilon :', getGlobalEpsilon()
# 3. Creation of sparse matrices:
# ==============================
# There are several convenient ways to create sparse matrices.
# You can create a SparseMatrix by passing it a 2D array:
s = SparseMatrix([[1,2],[3,4]], dtype='Float32')
print '\nFrom array 32\n', s
# ... or by passing it a numpy.array:
s = SparseMatrix(numpy.array([[1,2],[3,4]]),dtype='Float32')
print '\nFrom numpy array 32\n', s
# ... or by using one of the shortcuts: SM32, SM32:
s = SM32([[1,2],[3,4]])
print '\nWith shortcut 32\n', s
# It is also possible to create an empty SparseMatrix, or a copy of another
# SparseMatrix, or a SparseMatrix from a string in CSR format:
s_empty = SM32()
print '\nEmpty sparse matrix\n', s_empty
s_string = SM32('sm_csr_1.5 26 2 2 4 2 0 1 1 2 2 0 3 1 4')
print '\nSparse matrix from string\n', s_string
# A sparse matrix can be converted to a dense one via toDense:
a = numpy.array(s_string.toDense())
print '\ntoDense\n', a
# To set a sparse matrix from a dense one, one can use fromDense:
s = SM32()
s.fromDense(numpy.random.random((4,4)))
print '\nfromDense\n', s
# A sparse matrix can be pickled:
cPickle.dump(s, open('sm.txt', 'wb'))
s2 = cPickle.load(open('sm.txt', 'rb'))
print '\nPickling\n', s2
# 4. Simple queries:
# =================
# You can print a SparseMatrix, and query it for its number of rows, columns,
# non-zeros per row or column... There are many query methods available.
# All row operations are mirrored by the equivalent column operations
# Most operations are available either for a given row, or a given col, or
# all rows or all cols simultaneously. All col operations can be pretty efficient,
# even if the internal storage is CSR.
s = SM32(numpy.random.random((4,4)))
s.threshold(.5)
print '\nPrint\n', s
print '\nNumber of rows ', s.nRows()
print 'Number of columns ', s.nCols()
print 'Is matrix zero? ', s.isZero()
print 'Total number of non zeros ', s.nNonZeros()
print 'Sum of all values ', s.sum()
print 'Prod of non-zeros ', s.prod()
print 'Maximum value and its location ', s.max()
print 'Minimum value and its location ', s.min()
print 'Number of non-zeros on row 0 ', s.nNonZerosOnRow(0)
print 'If first row zero? ', s.isRowZero(0)
print 'Number of non-zeros on each row ', s.nNonZerosPerRow()
print 'Minimum on row 0 ', s.rowMin(0)
print 'Minimum values and locations for all rows', s.rowMin()
print 'Maximum on row 0 ', s.rowMax(0)
print 'Maximum values and locations for all rows', s.rowMax()
print 'Sum of values on row 0 ', s.rowSum(0)
print 'Sum of each row ', s.rowSums()
print 'Product of non-zeros on row 1', s.rowProd(1)
print 'Product of each row ', s.rowProds()
print 'Number of non-zeros on col 0 ', s.nNonZerosOnCol(0)
print 'If first col zero? ', s.isColZero(0)
print 'Number of non-zeros on each col ', s.nNonZerosPerCol()
print 'Minimum on col 0 ', s.colMin(0)
print 'Minimum values and locations for all cols', s.colMin()
print 'Maximum on col 0 ', s.colMax(0)
print 'Maximum values and locations for all cols', s.colMax()
print 'Sum of values on col 0 ', s.colSum(0)
print 'Sum of each col ', s.colSums()
print 'Product of non-zeros on col 1', s.colProd(1)
print 'Product of each col ', s.colProds()
# 5. Element access and slicing:
# =============================
# It is very easy to access individual elements:
print '\n', s
print '\ns[0,0] = ', s[0,0], 's[1,1] = ', s[1,1]
s[0,0] = 3.5
print 'Set [0,0] to 3.5 ', s[0,0]
# There are powerful slicing operations:
print '\ngetOuter\n', s.getOuter([0,2],[0,2])
s.setOuter([0,2],[0,2],[[1,2],[3,4]])
print '\nsetOuter\n', s
s.setElements([0,1,2],[0,1,2],[1,1,1])
print '\nsetElements\n', s
print '\ngetElements\n', s.getElements([0,1,2],[0,1,2])
s2 = s.getSlice(0,2,0,3)
print '\ngetSlice\n', s2
s.setSlice(1,1, s2)
print '\nsetSlice\n', s
# A whole row or col can be set to zero with one call:
s.setRowToZero(1)
print '\nsetRowToZero\n', s
s.setColToZero(1)
print '\nsetColToZero\n', s
# Individual rows and cols can be retrieved as sparse or dense vectors:
print '\nrowNonZeros ', s.rowNonZeros(0)
print 'colNonZeros ', s.colNonZeros(0)
print 'getRow ', s.getRow(0)
print 'getCol ', s.getCol(0)
# 6. Dynamic features:
# ===================
# SparseMatrix is very dynamic. Rows and columns can be added and deleted.
# A sparse matrix can also be resized and reshaped.
print '\n', s
s.reshape(2,8)
print '\nreshape 2 8\n', s
s.reshape(8,2)
print '\nreshape 8 2\n', s
s.reshape(1,16)
print '\nreshape 1 16\n', s
s.reshape(4,4)
print '\nreshape 4 4\n', s
s.resize(5,5)
print '\nresize 5 5\n', s
s.resize(3,3)
print '\nresize 3 3\n', s
s.resize(4,4)
print '\nresize 4 4\n', s
s.deleteRows([3])
print '\ndelete row 3\n', s
s.deleteCols([1])
print '\ndelete col 1\n', s
s.addRow([1,2,3])
print '\nadd row 1 2 3\n', s
s.addCol([1,2,3,4])
print '\nadd col 1 2 3 4\n', s
s.deleteRows([0,3])
print '\ndelete rows 0 and 3\n', s
s.deleteCols([1,2])
print '\ndelete cols 1 and 2\n', s
# It is also possible to threshold a row, column or whole sparse matrix.
# This operation usually introduces zeros.
s.normalize()
print '\n', s
s.thresholdRow(0, .1)
print '\nthreshold row 0 .1\n', s
s.thresholdCol(1, .1)
print '\nthreshold col 1 .1\n', s
s.threshold(.1)
print '\nthreshold .1\n', s
# 7. Element wise operations:
# ==========================
# Element wise operations are prefixed with 'element'. There are row-oriented
# column-oriented and whole matrix element-wise operations.
s = SM32(numpy.random.random((4,4)))
print '\n', s
s.elementNZInverse()
print '\nelementNZInverse\n', s
s.elementNZLog()
print '\nelementNZLog\n', s
s = abs(s)
print '\nabs\n', s
s.elementSqrt()
print '\nelementSqrt\n', s
s.add(4)
print '\nadd 4\n', s
s.normalizeRow(1, 10)
print '\nnormalizeRow 1 10\n', s
print 'sum row 1 = ', s.rowSum(1)
s.normalizeCol(0, 3)
print '\nnormalizeCol 0 3\n', s
print 'sum col 0 = ', s.colSum(0)
s.normalize(5)
print '\nnormalize to 5\n', s
print 'sum = ', s.sum()
s.normalize()
print '\nnormalize\n', s
print 'sum = ', s.sum()
s.transpose()
print '\ntranspose\n', s
s2 = SM32(numpy.random.random((3,4)))
print '\n', s2
s2.transpose()
print '\ntranspose rectangular\n', s2
s2.transpose()
print '\ntranspose rectangular again\n', s2
# 8. Matrix vector and matrix matrix operations:
# =============================================
# SparseMatrix provides matrix vector multiplication on the right and left,
# as well as specialized operations between the a vector and the rows
# of the SparseMatrix.
x = numpy.array([1,2,3,4])
print '\nx = ', x
print 'Product on the right:\n', s.rightVecProd(x)
print 'Product on the left:\n', s.leftVecProd(x)
print 'Product of x elements corresponding to nz on each row:\n', s.rightVecProdAtNZ(x)
print 'Product of x elements and nz:\n', s.rowVecProd(x)
print 'Max of x elements corresponding to nz:\n', s.vecMaxAtNZ(x)
print 'Max of products of x elements and nz:\n', s.vecMaxProd(x)
print 'Max of elements of x corresponding to nz:\n', s.vecMaxAtNZ(x)
# axby computes linear combinations of rows and vectors
s.axby(0, 1.5, 1.5, x)
print '\naxby 0 1.5 1.5\n', s
s.axby(1.5, 1.5, x)
print '\naxby 1.5 1.5\n', s
# The multiplication operator can be used both for inner and outer product,
# depending on the shape of its operands, when using SparseMatrix instances:
s_row = SM32([[1,2,3,4]])
s_col = SM32([[1],[2],[3],[4]])
print '\nInner product: ', s_row * s_col
print '\nOuter product:\n', s_col * s_row
# SparseMatrix supports matrix matrix multiplication:
s1 = SM32(numpy.random.random((4,4)))
s2 = SM32(numpy.random.random((4,4)))
print '\nmatrix matrix multiplication\n', s1 * s2
# The block matrix vector multiplication treats the matrix as if it were
# a collection of narrower matrices. The following multiplies a1 by x and then a2 by x,
# where a1 is the sub-matrix of size (4,2) obtained by considering
# only the first two columns of a, and a2 the sub-matrix obtained by considering only
# the last two columns of x.
a = SM32([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
x = [1,2,3,4]
print a.blockRightVecProd(2, x)
# To do an element multiplication of two matrices, do:
print a
b = SM32(numpy.random.randint(0,2,(4,4)))
print b
a.elementNZMultiply(b)
print a
# In general, the "element..." operations implement element by element operations.
# 9. Arithmetic operators:
# =======================
# It is possible to use all 4 arithmetic operators, with scalars or matrices:
print '\ns + 3\n', s + 3
print '\n3 + s\n', 3 + s
print '\ns - 1\n', s - 1
print '\n1 - s\n', 1 - s
print '\ns + s\n', s + s
print '\ns * 3\n', s * 3
print '\n3 * s\n', 3 * s
print '\ns * s\n', s * s
print '\ns / 3.1\n', s / 3.1
# ... and to write arbitrarily linear combinations of sparse matrices:
print '\ns1 + 2 * s - s2 / 3.1\n', s1 + 2 * s - s2 / 3.1
# In place operators are supported:
s += 3.5
print '\n+= 3.5\n', s
s -= 3.2
print '\n-= 3.2\n', s
s *= 3.1
print '\n*= 3.1\n', s
s /= -1.5
print '\n/= -1.5\n', s
# 10. Count/find:
# ==============
# Use countWhereEqual and whereEqual to count or find the elements that have
# a specific value. The first four parameters define a box in which to look:
# [begin_row, end_row) X [begin_col, end _col). The indices returned by whereEqual
# are relative to the orignal matrix. countWhereEqual is faster than using len()
# on the list returned by whereEqual.
s = SM32(numpy.random.randint(0,3,(5,5)))
print '\nThe matrix is now:\n', s
print '\nNumber of elements equal to 0=', s.countWhereEqual(0,5,0,5,0)
print 'Number of elements equal to 1=', s.countWhereEqual(0,5,0,5,1)
print 'Number of elements equal to 2=', s.countWhereEqual(0,5,0,5,2)
print '\nIndices of the elements == 0:', s.whereEqual(0,5,0,5,0)
print '\nIndices of the elements == 1:', s.whereEqual(0,5,0,5,1)
print '\nIndices of the elements == 2:', s.whereEqual(0,5,0,5,2)
# ... and there is even more:
print '\nAll ' + str(len(dir(s))) + ' methods:\n', dir(s)
| blueburningcoder/nupic | examples/bindings/sparse_matrix_how_to.py | Python | agpl-3.0 | 12,347 |
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if not strs:
return ""
length = len(strs[0])
for each in strs[1:]:
compare_len = min(length, len(each)
for index in range(compare_len)):
if strs[0][index] != each[index]:
length = index
break
else:
length = compare_len
return strs[0][:length]
| BedivereZero/LeeCode | algorithms/0014-longest-common-prefix.py | Python | gpl-3.0 | 470 |
# -*- coding: utf-8 -*-
# Author: Steven J. Bethard <[email protected]>.
# flake8: noqa
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.2.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = sorted(iterable)
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(
self,
prog,
indent_increment=2,
max_help_position=24,
width=None,
):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(
self._action_max_length,
action_length,
)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([
part
for part in part_strings
if part and part is not SUPPRESS
])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(
self._action_max_length + 2,
self._max_help_position,
)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(
text, width, initial_indent=indent,
subsequent_indent=indent,
)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(
message=self.message,
argument_name=self.argument_name,
)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(
self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(
self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
if nargs == 0:
raise ValueError(
'nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate',
)
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(
self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None,
):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(
self,
option_strings,
dest,
default=False,
required=False,
help=None,
):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help,
)
class _StoreFalseAction(_StoreConstAction):
def __init__(
self,
option_strings,
dest,
default=True,
required=False,
help=None,
):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help,
)
class _AppendAction(Action):
def __init__(
self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
if nargs == 0:
raise ValueError(
'nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate',
)
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(
self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None,
):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(
self,
option_strings,
dest,
default=None,
required=False,
help=None,
):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(
self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None,
):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(
self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit",
):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help,
)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(
self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None,
):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar,
)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(
arg_strings, namespace,
)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(
self,
description,
prefix_chars,
argument_default,
conflict_handler,
):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler,
)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required,
)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _(
'invalid option string %r: '
'must start with a character %r',
)
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([
option_string
for option_string, action
in conflicting_actions
])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(
self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning,
)
superinit = super(ArgumentParser, self).__init__
superinit(
description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler,
)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix + 'h', default_prefix * 2 + 'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'),
)
if self.version:
self.add_argument(
default_prefix + 'v', default_prefix * 2 + 'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"),
)
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [
action
for action in self._actions
if action.option_strings
]
def _get_positional_actions(self):
return [
action
for action in self._actions
if not action.option_strings
]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index
])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [
_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS
]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([
self._get_nargs_pattern(action)
for action in actions_slice
])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join(
[option_string for action, option_string, explicit_arg in option_tuples],
)
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (
not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings
):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(
self.usage, self._actions,
self._mutually_exclusive_groups,
)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(
self.usage, self._actions,
self._mutually_exclusive_groups,
)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning,
)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning,
)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
| joelthelion/autojump | bin/autojump_argparse.py | Python | gpl-3.0 | 87,584 |
from argparse import Namespace
import unittest
from distance import Level
from distance.filter import RemoveFilter
def mkargs(maxrecurse=-1, type=[], numbers=[], section=[],
print_=False, invert=False):
return Namespace(**locals())
class RemoveTest(unittest.TestCase):
def test_by_type(self):
l = Level("tests/in/level/test-straightroad.bytes")
f = RemoveFilter(mkargs(type=["Empire(Start|End)Zone"]))
f.apply(l)
for obj in l.layers[0].objects:
if "Zone" in obj.type:
raise AssertionError("Found road: {obj.type}")
self.assertEqual(['EmpireStartZone', 'EmpireEndZone'], [o.type for o in f.removed])
self.assertEqual(2, f.num_matches)
def test_by_number(self):
l = Level("tests/in/level/test-straightroad.bytes")
f = RemoveFilter(mkargs(numbers=[0]))
f.apply(l)
obj = l.layers[0].objects[0]
self.assertEqual('KillGridInfinitePlane', obj.type)
self.assertEqual(['LevelEditorCarSpawner'], [o.type for o in f.removed])
self.assertEqual(6, f.num_matches)
def test_by_section(self):
l = Level("tests/in/level/test-straightroad.bytes")
f = RemoveFilter(mkargs(section=["3,9,1"]))
f.apply(l)
self.assertEqual(3, len(l.layers[0].objects))
self.assertEqual(['DirectionalLight', 'EmpireStartZone', 'EmpireEndZone'],
[o.type for o in f.removed])
self.assertEqual(3, f.num_matches)
def test_invert(self):
l = Level("tests/in/level/test-straightroad.bytes")
f = RemoveFilter(mkargs(type=['Road'], invert=True))
f.apply(l)
self.assertEqual(1, len(l.layers[0].objects))
self.assertEqual('EmpireSplineRoadStraight', l.layers[0].objects[0].type)
# vim:set sw=4 ts=8 sts=4 et:
| ferreum/distanceutils | tests/filter/test_remove.py | Python | mit | 1,853 |
"""
Exception types for txacme.
"""
import attr
@attr.s
class NotInZone(ValueError):
"""
The given domain name is not in the configured zone.
"""
server_name = attr.ib()
zone_name = attr.ib()
def __str__(self):
return repr(self)
@attr.s
class ZoneNotFound(ValueError):
"""
The configured zone was not found in the zones at the configured provider.
"""
zone_name = attr.ib()
def __str__(self):
return repr(self)
__all__ = ['NotInZone', 'ZoneNotFound']
| mithrandi/txacme | src/txacme/errors.py | Python | mit | 519 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that a module that we import into an SConscript file can itself
easily import the global SCons variables, and a handful of other variables
directly from SCons.Script modules.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
import m1
""")
test.write("m1.py", """\
from SCons.Script import *
SConscript('SConscript')
""")
test.write('SConscript', """\
import m2
import m3
import m4
""")
test.write("m2.py", """\
from SCons.Script import *
Command("file.out", "file.in", Copy("$TARGET", "$SOURCE"))
""")
test.write("m3.py", """\
import SCons.Script
SCons.Script.BuildTask
SCons.Script.CleanTask
SCons.Script.QuestionTask
#SCons.Script.PrintHelp
SCons.Script.OptParser
SCons.Script.SConscriptSettableOptions
SCons.Script.keep_going_on_error
#SCons.Script.print_dtree
SCons.Script.print_explanations
SCons.Script.print_includes
SCons.Script.print_objects
SCons.Script.print_time
#SCons.Script.print_tree
SCons.Script.memory_stats
SCons.Script.ignore_errors
#SCons.Script.sconscript_time
#SCons.Script.command_time
#SCons.Script.exit_status
#SCons.Script.profiling
SCons.Script.repositories
""")
test.write("m4.py", """\
import SCons.Script.SConscript
SCons.Script.SConscript.Arguments
SCons.Script.SConscript.ArgList
SCons.Script.SConscript.BuildTargets
SCons.Script.SConscript.CommandLineTargets
SCons.Script.SConscript.DefaultTargets
""")
test.write("file.in", "file.in\n")
test.run(arguments = '.')
test.must_match("file.out", "file.in\n")
test.pass_test()
| datalogics/scons | test/Script-import.py | Python | mit | 2,675 |
#!usr/bin/env Python
'''This program demonstrate the use of A* Algorithm'''
import heapq
def astar(graph,current,end):
openset = set()
openheap = []
closeset = set()
def retracepath(c):
path = [c]
while c.parent is not None:
c = c.parent
path.append(c)
path.reverse()
return path
openset.add(current)
openheap.append((0,current))
while openset:
current = heapq.heappop(openheap)[1]
if current == end:
return retracepath(current)
openset.remove(current)
closeset.add(current)
for tile in graph[current]:
if tile not in closeset:
tile.H = (abs(end.x- tile.x)+abs(end.y-tile.y))*10
if tile not in openset:
openset.add(tile)
heapq.heappush(openheap,(tile.H,tile))
tile.parent = current
return [] | triump0870/hackerearth | astar.py | Python | gpl-2.0 | 754 |
"""
sentry.nodestore.django.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import (
BaseModel, GzippedDictField, sane_repr)
class Node(BaseModel):
id = models.CharField(max_length=40, primary_key=True)
data = GzippedDictField()
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
__repr__ = sane_repr('timestamp')
class Meta:
app_label = 'nodestore'
| jokey2k/sentry | src/sentry/nodestore/django/models.py | Python | bsd-3-clause | 648 |
"""Export command
"""
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see http://www.gnu.org/licenses/.
"""
import os
import sys
from stgit.argparse import opt
from stgit.commands import common
from stgit import argparse, git, templates
from stgit.out import out
from stgit.lib import git as gitlib
help = 'Export patches to a directory'
kind = 'patch'
usage = ['[options] [--] [<patch1>] [<patch2>] [<patch3>..<patch4>]']
description = """
Export a range of applied patches to a given directory (defaults to
'patches-<branch>') in a standard unified GNU diff format. A template
file (defaulting to '.git/patchexport.tmpl' or
'~/.stgit/templates/patchexport.tmpl' or
'/usr/share/stgit/templates/patchexport.tmpl') can be used for the
patch format. The following variables are supported in the template
file:
%(description)s - patch description
%(shortdescr)s - the first line of the patch description
%(longdescr)s - the rest of the patch description, after the first line
%(diffstat)s - the diff statistics
%(authname)s - author's name
%(authemail)s - author's e-mail
%(authdate)s - patch creation date
%(commname)s - committer's name
%(commemail)s - committer's e-mail"""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches,
argparse.hidden_patches)]
options = [
opt('-d', '--dir', args = [argparse.dir],
short = 'Export patches to DIR instead of the default'),
opt('-p', '--patch', action = 'store_true',
short = 'Append .patch to the patch names'),
opt('-e', '--extension',
short = 'Append .EXTENSION to the patch names'),
opt('-n', '--numbered', action = 'store_true',
short = 'Prefix the patch names with order numbers'),
opt('-t', '--template', metavar = 'FILE', args = [argparse.files],
short = 'Use FILE as a template'),
opt('-b', '--branch', args = [argparse.stg_branches],
short = 'Use BRANCH instead of the default branch'),
opt('-s', '--stdout', action = 'store_true',
short = 'Dump the patches to the standard output'),
] + argparse.diff_opts_option()
directory = common.DirectoryHasRepositoryLib()
def func(parser, options, args):
"""Export a range of patches.
"""
stack = directory.repository.get_stack(options.branch)
if options.dir:
dirname = options.dir
else:
dirname = 'patches-%s' % stack.name
directory.cd_to_topdir()
if not options.branch and git.local_changes():
out.warn('Local changes in the tree;'
' you might want to commit them first')
if not options.stdout:
if not os.path.isdir(dirname):
os.makedirs(dirname)
series = file(os.path.join(dirname, 'series'), 'w+')
applied = stack.patchorder.applied
unapplied = stack.patchorder.unapplied
if len(args) != 0:
patches = common.parse_patches(args, applied + unapplied, len(applied))
else:
patches = applied
num = len(patches)
if num == 0:
raise common.CmdException, 'No patches applied'
zpadding = len(str(num))
if zpadding < 2:
zpadding = 2
# get the template
if options.template:
tmpl = file(options.template).read()
else:
tmpl = templates.get_template('patchexport.tmpl')
if not tmpl:
tmpl = ''
# note the base commit for this series
if not options.stdout:
base_commit = stack.base.sha1
print >> series, '# This series applies on GIT commit %s' % base_commit
patch_no = 1;
for p in patches:
pname = p
if options.patch:
pname = '%s.patch' % pname
elif options.extension:
pname = '%s.%s' % (pname, options.extension)
if options.numbered:
pname = '%s-%s' % (str(patch_no).zfill(zpadding), pname)
pfile = os.path.join(dirname, pname)
if not options.stdout:
print >> series, pname
# get the patch description
patch = stack.patches.get(p)
cd = patch.commit.data
descr = cd.message.strip()
descr_lines = descr.split('\n')
short_descr = descr_lines[0].rstrip()
long_descr = reduce(lambda x, y: x + '\n' + y,
descr_lines[1:], '').strip()
diff = stack.repository.diff_tree(cd.parent.data.tree, cd.tree, options.diff_flags)
tmpl_dict = {'description': descr,
'shortdescr': short_descr,
'longdescr': long_descr,
'diffstat': gitlib.diffstat(diff).rstrip(),
'authname': cd.author.name,
'authemail': cd.author.email,
'authdate': cd.author.date.isoformat(),
'commname': cd.committer.name,
'commemail': cd.committer.email}
for key in tmpl_dict:
if not tmpl_dict[key]:
tmpl_dict[key] = ''
try:
descr = tmpl % tmpl_dict
except KeyError, err:
raise common.CmdException, 'Unknown patch template variable: %s' \
% err
except TypeError:
raise common.CmdException, 'Only "%(name)s" variables are ' \
'supported in the patch template'
if options.stdout:
f = sys.stdout
else:
f = open(pfile, 'w+')
if options.stdout and num > 1:
print '-'*79
print patch.name
print '-'*79
f.write(descr)
f.write(diff)
if not options.stdout:
f.close()
patch_no += 1
if not options.stdout:
series.close()
| terinjokes/stgit | stgit/commands/export.py | Python | gpl-2.0 | 6,312 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import RPi.GPIO as GPIO
import time
from threading import Thread
class Tact(object):
'''
Tact class
'''
__pin = 0
__real_true = GPIO.HIGH
__status = False
__observers = []
def __init__(self, pin, real_true = GPIO.HIGH):
'''
Init the tact
:param pin: pin number in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pin = pin
self.__real_true = real_true
if self.__real_true:
self.__status = GPIO.input(self.__pin)
else:
self.__status = not GPIO.input(self.__pin)
GPIO.add_event_detect(pin, GPIO.BOTH, callback = self.make_event, bouncetime = 1)
try:
t1 = Thread(target = self.watching)
t1.setDaemon(True)
except:
print("Error: Unable to start thread by Tact")
#Stauts.
@property
def is_on(self):
'''
Get current of tact
'''
if self.__real_true:
if self.__status != GPIO.input(self.__pin):
self.__status = GPIO.input(self.__pin)
else:
if self.__status == GPIO.input(self.__pin):
self.__status = not GPIO.input(self.__pin)
return self.__status
#Events
def register(self, observer):
if observer not in self.__observers:
self.__observers.append(observer)
def deregister(self, observer):
if observer in self.__observers:
self.__observers.remove(observer)
def notify_observers(self, status):
for o in self.__observers:
o.on_tact_event(self.__pin, status)
def event(self, action):
self.notify_observers(action)
def make_event(self, channel):
self.notify_observers(self.__real_true if GPIO.input(self.__pin) else not self.__real_true)
if self.__real_true:
if self.__status != GPIO.input(self.__pin):
self.__status = GPIO.input(self.__pin)
#self.notify_observers(self.__real_true if self.__status else not self.__real_true)
else:
if self.__status == GPIO.input(self.__pin):
self.__status = not GPIO.input(self.__pin)
#self.notify_observers(self.__real_true if not self.__status else not self.__real_true)
def watching(self):
if self.__real_true:
while True:
if GPIO.input(self.__pin) != self.__status:
self.__status = GPIO.input(self.__pin)
self.notify_observers(self.__real_true if self.__status else not self.__real_true)
time.sleep(0.05)
else:
while True:
if GPIO.input(self.__pin) == self.__status:
self.__status = not GPIO.input(self.__pin)
self.notify_observers(self.__real_true if not self.__status else not self.__real_true)
time.sleep(0.05)
class TactRow(object):
'''
Class of tacts in row
'''
__tacts = []
__pins = []
__real_true = GPIO.HIGH
def __init__(self, pins, real_true = GPIO.HIGH):
'''
Init the tacts
:param pin: pin numbers in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pins = pins
self.__real_true = real_true
for p in pins:
self.__tacts.append(Tact(p, real_true))
#Stauts.
def is_on(self, index):
'''
Get status of tact in tactrow by index
:param index: index of the tact
:return: status in boolean
'''
if index >= len(self.__tacts):
return False
return self.__tacts[index].is_on
@property
def row_status(self):
'''
Get status array of the tactrow
:return: status array
'''
r = []
for l in self.__tacts:
r.append(l.is_on)
return r
@property
def items(self):
'''
Get the instances of the tacts in tactrow
:return: instances array
'''
return self.__tacts
| maxis1314/raspberry | SDK/sakshat/entities/tact.py | Python | apache-2.0 | 4,814 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2021-11-10 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0238_abstractprovider_allow_updates'),
]
operations = [
migrations.AddIndex(
model_name='schemaresponse',
index=models.Index(fields=['object_id', 'content_type'], name='osf_schemar_object__8cc95e_idx'),
),
]
| Johnetordoff/osf.io | osf/migrations/0239_auto_20211110_1921.py | Python | apache-2.0 | 497 |
from __future__ import (absolute_import, division, print_function)
from mantid.api import (PythonAlgorithm, AlgorithmFactory,
MatrixWorkspaceProperty, Progress, InstrumentValidator,
ITableWorkspaceProperty)
from mantid.kernel import Direction, FloatBoundedValidator, Property
import numpy as np
from scipy import integrate
import scipy as sp
class ComputeCalibrationCoefVan(PythonAlgorithm):
""" Calculate coefficients to normalize by Vanadium and correct Debye
Waller factor
"""
def __init__(self):
"""
Init
"""
PythonAlgorithm.__init__(self)
self.vanaws = None
self.defaultT = 293.0 # K, default temperature if not given
self.Mvan = 50.942 # [g/mol], Vanadium molar mass
self.DebyeT = 389.0 # K, Debye temperature for Vanadium
def category(self):
""" Return category
"""
return "CorrectionFunctions\\EfficiencyCorrections"
def name(self):
""" Return summary
"""
return "ComputeCalibrationCoefVan"
def summary(self):
return ("Calculate coefficients for detector efficiency correction " +
"using the Vanadium data.")
def PyInit(self):
""" Declare properties
"""
self.declareProperty(MatrixWorkspaceProperty(
"VanadiumWorkspace", "",
direction=Direction.Input,
validator=InstrumentValidator()),
"Input Vanadium workspace")
self.declareProperty(ITableWorkspaceProperty("EPPTable", "",
direction=Direction.Input),
("Input EPP table. May be produced by FindEPP " +
"algorithm."))
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
direction=Direction.Output),
("Name the workspace that will contain the " +
"calibration coefficients"))
self.declareProperty("Temperature",
defaultValue=Property.EMPTY_DBL,
validator=FloatBoundedValidator(lower=0.0),
direction=Direction.Input,
doc=("Temperature during the experiment (in " +
"Kelvins) if the 'temperature' sample log " +
"is missing or needs to be overriden."))
return
def validateInputs(self):
""" Validate the inputs
"""
issues = dict()
inws = self.getProperty("VanadiumWorkspace").value
run = inws.getRun()
if not run.hasProperty('wavelength'):
issues['VanadiumWorkspace'] = ("Input workspace must have " +
"wavelength sample log.")
else:
try:
float(run.getProperty('wavelength').value)
except ValueError:
issues['VanadiumWorkspace'] = ("Invalid value for " +
"wavelength sample log. " +
"Wavelength must be a number.")
table = self.getProperty("EPPTable").value
if table.rowCount() != inws.getNumberHistograms():
issues['EPPTable'] = ("Number of rows in the table must match " +
"to the input workspace dimension.")
# table must have 'PeakCentre' and 'Sigma' columns
if 'PeakCentre' not in table.getColumnNames():
issues['EPPTable'] = "EPP Table must have the PeakCentre column."
if 'Sigma' not in table.getColumnNames():
issues['EPPTable'] = "EPP Table must have the Sigma column."
return issues
def get_temperature(self):
"""Return the temperature
"""
if not self.getProperty("Temperature").isDefault:
return self.getProperty("Temperature").value
run = self.vanaws.getRun()
if not run.hasProperty('temperature'):
self.log().warning("No Temperature given and the 'temperature' " +
"sample log is not present in " +
self.vanaws.name() +
" T=293K is assumed for Debye-Waller factor.")
return self.defaultT
try:
temperature = float(run.getProperty('temperature').value)
except ValueError as err:
self.log().warning("Error of getting temperature from the " +
"sample log " + err + " T=293K is assumed " +
"for Debye-Waller factor.")
return self.defaultT
return temperature
def PyExec(self):
""" Main execution body
"""
# returns workspace instance
self.vanaws = self.getProperty("VanadiumWorkspace").value
# returns workspace name (string)
outws_name = self.getPropertyValue("OutputWorkspace")
eppws = self.getProperty("EPPTable").value
nhist = self.vanaws.getNumberHistograms()
prog_reporter = Progress(self, start=0.0, end=1.0, nreports=nhist+1)
# calculate array of Debye-Waller factors
dwf = self.calculate_dwf()
# for each detector: fit gaussian to get peak_centre and fwhm
# sum data in the range [peak_centre - 3*fwhm, peak_centre + 3*fwhm]
dataX = self.vanaws.readX(0)
coefY = np.zeros(nhist)
coefE = np.zeros(nhist)
peak_centre = eppws.column('PeakCentre')
sigma = eppws.column('Sigma')
specInfo = self.vanaws.spectrumInfo()
for idx in range(nhist):
prog_reporter.report("Setting %dth spectrum" % idx)
dataY = self.vanaws.readY(idx)
if np.max(dataY) == 0 or specInfo.isMasked(idx):
coefY[idx] = 0.
coefE[idx] = 0.
else:
dataE = self.vanaws.readE(idx)
fwhm = sigma[idx]*2.*np.sqrt(2.*np.log(2.))
idxmin = (np.fabs(dataX-peak_centre[idx]+3.*fwhm)).argmin()
idxmax = (np.fabs(dataX-peak_centre[idx]-3.*fwhm)).argmin()
coefY[idx] = sum(dataY[idxmin:idxmax+1])/dwf[idx]
coefE[idx] = np.sqrt(sum(
np.square(dataE[idxmin:idxmax+1])))/dwf[idx]
# create X array, X data are the same for all detectors, so
coefX = np.zeros(nhist)
coefX.fill(dataX[0])
create = self.createChildAlgorithm("CreateWorkspace")
create.setPropertyValue('OutputWorkspace', outws_name)
create.setProperty('ParentWorkspace', self.vanaws)
create.setProperty('DataX', coefX)
create.setProperty('DataY', coefY)
create.setProperty('DataE', coefE)
create.setProperty('NSpec', nhist)
create.setProperty('UnitX', 'TOF')
create.execute()
outws = create.getProperty('OutputWorkspace').value
self.setProperty("OutputWorkspace", outws)
def get_detID_offset(self):
"""
returns ID of the first detector
"""
return self.vanaws.getSpectrum(0).getDetectorIDs()[0]
def calculate_dwf(self):
"""
Calculates Debye-Waller factor according to
Sears and Shelley Acta Cryst. A 47, 441 (1991)
"""
run = self.vanaws.getRun()
nhist = self.vanaws.getNumberHistograms()
thetasort = np.zeros(nhist) # theta in radians, not 2Theta
instrument = self.vanaws.getInstrument()
detID_offset = self.get_detID_offset()
for i in range(nhist):
det = instrument.getDetector(i + detID_offset)
thetasort[i] = 0.5 * np.sign(np.cos(det.getPhi())) * \
self.vanaws.detectorTwoTheta(det)
# T in K
temperature = self.get_temperature()
# Wavelength, Angstrom
wlength = float(run.getLogData('wavelength').value)
# Vanadium mass, kg
mass_vana = 0.001*self.Mvan/sp.constants.N_A
temp_ratio = temperature/self.DebyeT
if temp_ratio < 1.e-3:
integral = 0.5
else:
integral = \
integrate.quad(lambda x: x/sp.tanh(0.5*x/temp_ratio), 0, 1)[0]
msd = 3.*sp.constants.hbar**2 / \
(2.*mass_vana*sp.constants.k * self.DebyeT)*integral*1.e20
return np.exp(-msd*(4.*sp.pi*sp.sin(thetasort)/wlength)**2)
# Register algorithm with Mantid.
AlgorithmFactory.subscribe(ComputeCalibrationCoefVan)
| wdzhou/mantid | Framework/PythonInterface/plugins/algorithms/ComputeCalibrationCoefVan.py | Python | gpl-3.0 | 8,715 |
#!/usr/bin/env python
#
# ThugAPI.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import sys
import os
import logging
from zope.interface import implements
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from DOM.W3C import w3c
from DOM import Window
from DOM import HTTPSession
from DOM import DFT
from DOM import MIMEHandler
from DOM import SchemeHandler
from WebTracking import WebTracking
from Encoding import Encoding
from Logging.ThugLogging import ThugLogging
from .IThugAPI import IThugAPI
from .ThugOpts import ThugOpts
from .ThugVulnModules import ThugVulnModules
from .OpaqueFilter import OpaqueFilter
from .abstractmethod import abstractmethod
from Classifier import JSClassifier
from Classifier import URLClassifier
from Classifier import SampleClassifier
log = logging.getLogger("Thug")
__thug_version__ = '0.6.4'
class ThugAPI:
implements(IThugAPI)
def __init__(self, args):
self.args = args
self.thug_version = __thug_version__
log.ThugOpts = ThugOpts()
log.ThugVulnModules = ThugVulnModules()
log.WebTracking = WebTracking.WebTracking()
log.MIMEHandler = MIMEHandler.MIMEHandler()
log.SchemeHandler = SchemeHandler.SchemeHandler()
log.JSClassifier = JSClassifier.JSClassifier()
log.URLClassifier = URLClassifier.URLClassifier()
log.SampleClassifier = SampleClassifier.SampleClassifier()
log.Encoding = Encoding.Encoding()
def __call__(self):
self.analyze()
def usage(self):
pass
def version(self):
print("Thug %s" % (self.thug_version, ))
sys.exit(0)
def get_useragent(self):
return log.ThugOpts.useragent
def set_useragent(self, useragent):
log.ThugOpts.useragent = useragent
def get_events(self):
return log.ThugOpts.events
def set_events(self, events):
log.ThugOpts.events = events
def get_delay(self):
return log.ThugOpts.delay
def set_delay(self, delay):
log.ThugOpts.delay = delay
def get_file_logging(self):
return log.ThugOpts.file_logging
def set_file_logging(self):
log.ThugOpts.file_logging = True
def get_json_logging(self):
return log.ThugOpts.json_logging
def set_json_logging(self):
log.ThugOpts.json_logging = True
def get_maec11_logging(self):
return log.ThugOpts.maec11_logging
def set_maec11_logging(self):
log.ThugOpts.maec11_logging = True
def get_referer(self):
return log.ThugOpts.referer
def set_referer(self, referer):
log.ThugOpts.referer = referer
def get_proxy(self):
return log.ThugOpts.proxy
def set_proxy(self, proxy):
log.ThugOpts.proxy = proxy
def set_no_fetch(self):
log.ThugOpts.no_fetch = True
def set_verbose(self):
log.setLevel(logging.INFO)
def set_debug(self):
log.setLevel(logging.DEBUG)
def set_no_cache(self):
log.ThugOpts.cache = None
def set_ast_debug(self):
log.ThugOpts.ast_debug = True
def set_http_debug(self):
log.ThugOpts.http_debug += 1
def set_acropdf_pdf(self, acropdf_pdf):
log.ThugVulnModules.acropdf_pdf = acropdf_pdf
def disable_acropdf(self):
log.ThugVulnModules.disable_acropdf()
def set_shockwave_flash(self, shockwave):
log.ThugVulnModules.shockwave_flash = shockwave
def disable_shockwave_flash(self):
log.ThugVulnModules.disable_shockwave_flash()
def set_javaplugin(self, javaplugin):
log.ThugVulnModules.javaplugin = javaplugin
def disable_javaplugin(self):
log.ThugVulnModules.disable_javaplugin()
def get_threshold(self):
return log.ThugOpts.threshold
def set_threshold(self, threshold):
log.ThugOpts.threshold = threshold
def get_extensive(self):
return log.ThugOpts.extensive
def set_extensive(self):
log.ThugOpts.extensive = True
def get_timeout(self):
return log.ThugOpts.timeout
def set_timeout(self, timeout):
log.ThugOpts.timeout = timeout
def get_broken_url(self):
return log.ThugOpts.broken_url
def set_broken_url(self):
log.ThugOpts.broken_url = True
def get_web_tracking(self):
return log.ThugOpts.web_tracking
def set_web_tracking(self):
log.ThugOpts.web_tracking = True
def disable_honeyagent(self):
log.ThugOpts.honeyagent = False
def log_init(self, url):
log.ThugLogging = ThugLogging(self.thug_version)
log.ThugLogging.set_basedir(url)
def set_log_dir(self, logdir):
log.ThugLogging.set_absbasedir(logdir)
def set_log_output(self, output):
fh = logging.FileHandler(os.path.join(log.ThugLogging.baseDir, output))
log.addHandler(fh)
def set_log_quiet(self):
root = logging.getLogger()
for handler in root.handlers:
if isinstance(handler, logging.StreamHandler):
handler.addFilter(OpaqueFilter())
def set_vt_query(self):
log.ThugOpts.set_vt_query()
def set_vt_submit(self):
log.ThugOpts.set_vt_submit()
def get_vt_runtime_apikey(self):
return log.ThugOpts.vt_runtime_apikey
def set_vt_runtime_apikey(self, vt_runtime_apikey):
log.ThugOpts.vt_runtime_apikey = vt_runtime_apikey
def get_mongodb_address(self):
return log.ThugOpts.mongodb_address
def set_mongodb_address(self, mongodb_address):
log.ThugOpts.mongodb_address = mongodb_address
def add_urlclassifier(self, rule):
log.URLClassifier.add_rule(rule)
def add_jsclassifier(self, rule):
log.JSClassifier.add_rule(rule)
def add_sampleclassifier(self, rule):
log.SampleClassifier.add_rule(rule)
def log_event(self):
log.ThugLogging.log_event()
def run(self, window):
dft = DFT.DFT(window)
dft.run()
def run_local(self, url):
log.ThugLogging.set_url(url)
log.ThugOpts.local = True
log.HTTPSession = HTTPSession.HTTPSession()
html = open(url, 'r').read()
doc = w3c.parseString(html)
window = Window.Window('about:blank', doc, personality = log.ThugOpts.useragent)
window.open()
self.run(window)
def run_remote(self, url):
scheme = urlparse.urlparse(url).scheme
if not scheme or not scheme.startswith('http'):
url = 'http://%s' % (url, )
log.ThugLogging.set_url(url)
log.HTTPSession = HTTPSession.HTTPSession()
doc = w3c.parseString('')
window = Window.Window(log.ThugOpts.referer, doc, personality = log.ThugOpts.useragent)
window = window.open(url)
if window:
self.run(window)
@abstractmethod
def analyze(self):
pass
| morallo/thug | src/ThugAPI/ThugAPI.py | Python | gpl-2.0 | 7,585 |
#!/usr/bin/env python
'''Test that audio playback works.
You should hear white noise (static) for 0.5 seconds. The test will exit
immediately after.
You may want to turn down the volume of your speakers.
'''
import unittest
from pyglet import media
from pyglet.media import procedural
class TEST_CASE(unittest.TestCase):
def test_method(self):
source = procedural.WhiteNoise(0.5)
player = media.Player()
player.queue(source)
player.play()
while player.source:
player.dispatch_events()
if __name__ == '__main__':
unittest.main()
| shrimpboyho/herblore | pyglet-1.1.4/tests/media/PLAYER_QUEUE_PLAY.py | Python | gpl-2.0 | 599 |
"""
QUESTION:
Problem Description:
Given a binary search tree and a node in it, find the in-order successor of that node in the BST.
Note: If the given node has no in-order successor in the tree, return null.
ANSWER:
"""
class Solution(object):
def inorderSuccessor(self, root, node):
if node.right:
p = node.right
while p.left:
p = p.left
return p
suc = None
while root:
if node.val < root.val:
suc = root
root = root.left
elif node.val > root.val:
root = root.right
else:
break
return suc
if __name__ == '__main__':
print | tktrungna/leetcode | Python/inorder-successor-in-bst.py | Python | mit | 720 |
from django import forms
from django.forms import ModelForm
from models import *
from django.forms import Textarea
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group
class OgretimElemaniFormu(ModelForm):
parolasi=forms.CharField(label="Parolasi",
help_text='Parola yazilmazsa onceki parola gecerlidir',
required=False)
class Meta:
model = OgretimElemani
fields = ('unvani','first_name','last_name',
'username','parolasi','email','telefonu')
def save(self,commit=True):
instance = super(OgretimElemaniFormu,self).save(commit=False)
if self.cleaned_data.get('parolasi'):
instance.password=make_password(self.cleaned_data['parolasi'])
if commit:
instance.save()
if instance.group.filter(name='ogretimelemanlari'):
grp = Group.objects.get(name='ogretimelemanlari')
instance.group.add(grp)
return instance
class DersFormu(ModelForm):
class Meta:
model = Ders
widgets = {
'tanimi':Textarea(attrs={'cols':35, 'rows':5}),
}
#class OgretimElemaniAltFormu(ModelForm):
# class Meta:
# model = OgretimElemani
# fields = ('username','parolasi','email')
#exclude=('unvani','telefonu') bu da ayni gorevi gorur
# fields kullanarak hangi sirada gozukmesini istiyorsak ayarlayabiliriz
# yada exclude diyerek istemedigimiz alanlari formdan cikartabiliriz
class AramaFormu(forms.Form):
aranacak_kelime = forms.CharField()
def clean_aranacak_kelime(self):
kelime=self.cleaned_data['aranacak_kelime']
if len(kelime) < 3:
raise forms.ValidationError('Aranacak kelime 3 harften az olamaz')
return kelime | dogancankilment/Django-obs | yonetim/forms.py | Python | gpl-2.0 | 1,672 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import re
from ...ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
#
# HasIdOf
#
#-------------------------------------------------------------------------
class RegExpIdBase(Rule):
"""
Rule that checks for an object whose GRAMPS ID matches regular expression.
"""
labels = [ _('Regular expression:') ]
name = 'Objects with <Id>'
description = "Matches objects whose Gramps ID matches " \
"the regular expression"
category = _('General filters')
def __init__(self, list):
Rule.__init__(self, list)
try:
self.match = re.compile(list[0], re.I|re.U|re.L)
except:
self.match = re.compile('')
def apply(self, db, obj):
return self.match.match(obj.gramps_id) is not None
| arunkgupta/gramps | gramps/gen/filters/rules/_regexpidbase.py | Python | gpl-2.0 | 2,042 |
"""
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from django.db import models
#
# Abstract base classes
#
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __unicode__(self):
return u'%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
class Attachment(models.Model):
post = models.ForeignKey(Post, related_name='attached_%(class)s_set')
content = models.TextField()
class Meta:
abstract = True
def __unicode__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField()
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
class Chef(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return u"%s the chef" % self.name
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __unicode__(self):
return u"%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
chef = models.ForeignKey(Chef, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __unicode__(self):
return u"%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __unicode__(self):
return u"%s the italian restaurant" % self.name
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __unicode__(self):
return u"%s the supplier" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, related_name='lot')
def __unicode__(self):
return u"%s the parking lot" % self.name
__test__ = {'API_TESTS':"""
# The Student and Worker models both have 'name' and 'age' fields on them and
# inherit the __unicode__() method, just as with normal Python subclassing.
# This is useful if you want to factor out common information for programming
# purposes, but still completely independent separate models at the database
# level.
>>> w = Worker(name='Fred', age=35, job='Quarry worker')
>>> w.save()
>>> w2 = Worker(name='Barney', age=34, job='Quarry worker')
>>> w2.save()
>>> s = Student(name='Pebbles', age=5, school_class='1B')
>>> s.save()
>>> unicode(w)
u'Worker Fred'
>>> unicode(s)
u'Student Pebbles'
# The children inherit the Meta class of their parents (if they don't specify
# their own).
>>> Worker.objects.values('name')
[{'name': u'Barney'}, {'name': u'Fred'}]
# Since Student does not subclass CommonInfo's Meta, it has the effect of
# completely overriding it. So ordering by name doesn't take place for Students.
>>> Student._meta.ordering
[]
# However, the CommonInfo class cannot be used as a normal model (it doesn't
# exist as a model).
>>> CommonInfo.objects.all()
Traceback (most recent call last):
...
AttributeError: type object 'CommonInfo' has no attribute 'objects'
# Create a Post
>>> post = Post(title='Lorem Ipsum')
>>> post.save()
# The Post model has distinct accessors for the Comment and Link models.
>>> post.attached_comment_set.create(content='Save $ on V1agr@', is_spam=True)
<Comment: Save $ on V1agr@>
>>> post.attached_link_set.create(content='The Web framework for perfectionists with deadlines.', url='http://www.djangoproject.com/')
<Link: The Web framework for perfectionists with deadlines.>
# The Post model doesn't have an attribute called 'attached_%(class)s_set'.
>>> getattr(post, 'attached_%(class)s_set')
Traceback (most recent call last):
...
AttributeError: 'Post' object has no attribute 'attached_%(class)s_set'
# The Place/Restaurant/ItalianRestaurant models all exist as independent
# models. However, the subclasses also have transparent access to the fields of
# their ancestors.
# Create a couple of Places.
>>> p1 = Place(name='Master Shakes', address='666 W. Jersey')
>>> p1.save()
>>> p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
>>> p2.save()
Test constructor for Restaurant.
>>> r = Restaurant(name='Demon Dogs', address='944 W. Fullerton',serves_hot_dogs=True, serves_pizza=False, rating=2)
>>> r.save()
# Test the constructor for ItalianRestaurant.
>>> c = Chef(name="Albert")
>>> c.save()
>>> ir = ItalianRestaurant(name='Ristorante Miron', address='1234 W. Ash', serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True, rating=4, chef=c)
>>> ir.save()
>>> ItalianRestaurant.objects.filter(address='1234 W. Ash')
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
>>> ir.address = '1234 W. Elm'
>>> ir.save()
>>> ItalianRestaurant.objects.filter(address='1234 W. Elm')
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
# Make sure Restaurant and ItalianRestaurant have the right fields in the right
# order.
>>> [f.name for f in Restaurant._meta.fields]
['id', 'name', 'address', 'place_ptr', 'rating', 'serves_hot_dogs', 'serves_pizza', 'chef']
>>> [f.name for f in ItalianRestaurant._meta.fields]
['id', 'name', 'address', 'place_ptr', 'rating', 'serves_hot_dogs', 'serves_pizza', 'chef', 'restaurant_ptr', 'serves_gnocchi']
>>> Restaurant._meta.ordering
['-rating']
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a Restaurant
# object cannot access that reverse relation, since it's not part of the
# Place-Supplier Hierarchy.
>>> Place.objects.filter(supplier__name='foo')
[]
>>> Restaurant.objects.filter(supplier__name='foo')
Traceback (most recent call last):
...
FieldError: Cannot resolve keyword 'supplier' into field. Choices are: address, chef, id, italianrestaurant, lot, name, place_ptr, provider, rating, serves_hot_dogs, serves_pizza
# Parent fields can be used directly in filters on the child model.
>>> Restaurant.objects.filter(name='Demon Dogs')
[<Restaurant: Demon Dogs the restaurant>]
>>> ItalianRestaurant.objects.filter(address='1234 W. Elm')
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
# Filters against the parent model return objects of the parent's type.
>>> Place.objects.filter(name='Demon Dogs')
[<Place: Demon Dogs the place>]
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the child's
# name.
>>> place = Place.objects.get(name='Demon Dogs')
>>> place.restaurant
<Restaurant: Demon Dogs the restaurant>
>>> Place.objects.get(name='Ristorante Miron').restaurant.italianrestaurant
<ItalianRestaurant: Ristorante Miron the italian restaurant>
>>> Restaurant.objects.get(name='Ristorante Miron').italianrestaurant
<ItalianRestaurant: Ristorante Miron the italian restaurant>
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
>>> place.restaurant.italianrestaurant
Traceback (most recent call last):
...
DoesNotExist: ItalianRestaurant matching query does not exist.
# Related objects work just as they normally do.
>>> s1 = Supplier(name="Joe's Chickens", address='123 Sesame St')
>>> s1.save()
>>> s1.customers = [r, ir]
>>> s2 = Supplier(name="Luigi's Pasta", address='456 Sesame St')
>>> s2.save()
>>> s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's a
# Supplier).
>>> p = Place.objects.get(name="Joe's Chickens")
>>> p.restaurant
Traceback (most recent call last):
...
DoesNotExist: Restaurant matching query does not exist.
# But we can descend from p to the Supplier child, as expected.
>>> p.supplier
<Supplier: Joe's Chickens the supplier>
>>> ir.provider.order_by('-name')
[<Supplier: Luigi's Pasta the supplier>, <Supplier: Joe's Chickens the supplier>]
>>> Restaurant.objects.filter(provider__name__contains="Chickens")
[<Restaurant: Ristorante Miron the restaurant>, <Restaurant: Demon Dogs the restaurant>]
>>> ItalianRestaurant.objects.filter(provider__name__contains="Chickens")
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
>>> park1 = ParkingLot(name='Main St', address='111 Main St', main_site=s1)
>>> park1.save()
>>> park2 = ParkingLot(name='Well Lit', address='124 Sesame St', main_site=ir)
>>> park2.save()
>>> Restaurant.objects.get(lot__name='Well Lit')
<Restaurant: Ristorante Miron the restaurant>
# The update() command can update fields in parent and child classes at once
# (although it executed multiple SQL queries to do so).
>>> Restaurant.objects.filter(serves_hot_dogs=True, name__contains='D').update(name='Demon Puppies', serves_hot_dogs=False)
1
>>> r1 = Restaurant.objects.get(pk=r.pk)
>>> r1.serves_hot_dogs == False
True
>>> r1.name
u'Demon Puppies'
# The values() command also works on fields from parent models.
>>> d = {'rating': 4, 'name': u'Ristorante Miron'}
>>> list(ItalianRestaurant.objects.values('name', 'rating')) == [d]
True
# select_related works with fields from the parent object as if they were a
# normal part of the model.
>>> from django import db
>>> from django.conf import settings
>>> settings.DEBUG = True
>>> db.reset_queries()
>>> ItalianRestaurant.objects.all()[0].chef
<Chef: Albert the chef>
>>> len(db.connection.queries)
2
>>> ItalianRestaurant.objects.select_related('chef')[0].chef
<Chef: Albert the chef>
>>> len(db.connection.queries)
3
>>> settings.DEBUG = False
"""}
| grangier/django-11599 | tests/modeltests/model_inheritance/models.py | Python | bsd-3-clause | 10,437 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.thermal_zones_and_surfaces import WallAdiabatic
log = logging.getLogger(__name__)
class TestWallAdiabatic(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_walladiabatic(self):
pyidf.validation_level = ValidationLevel.error
obj = WallAdiabatic()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_construction_name = "object-list|Construction Name"
obj.construction_name = var_construction_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# real
var_azimuth_angle = 180.0
obj.azimuth_angle = var_azimuth_angle
# real
var_tilt_angle = 90.0
obj.tilt_angle = var_tilt_angle
# real
var_starting_x_coordinate = 6.6
obj.starting_x_coordinate = var_starting_x_coordinate
# real
var_starting_y_coordinate = 7.7
obj.starting_y_coordinate = var_starting_y_coordinate
# real
var_starting_z_coordinate = 8.8
obj.starting_z_coordinate = var_starting_z_coordinate
# real
var_length = 9.9
obj.length = var_length
# real
var_height = 10.1
obj.height = var_height
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.walladiabatics[0].name, var_name)
self.assertEqual(idf2.walladiabatics[0].construction_name, var_construction_name)
self.assertEqual(idf2.walladiabatics[0].zone_name, var_zone_name)
self.assertAlmostEqual(idf2.walladiabatics[0].azimuth_angle, var_azimuth_angle)
self.assertAlmostEqual(idf2.walladiabatics[0].tilt_angle, var_tilt_angle)
self.assertAlmostEqual(idf2.walladiabatics[0].starting_x_coordinate, var_starting_x_coordinate)
self.assertAlmostEqual(idf2.walladiabatics[0].starting_y_coordinate, var_starting_y_coordinate)
self.assertAlmostEqual(idf2.walladiabatics[0].starting_z_coordinate, var_starting_z_coordinate)
self.assertAlmostEqual(idf2.walladiabatics[0].length, var_length)
self.assertAlmostEqual(idf2.walladiabatics[0].height, var_height) | rbuffat/pyidf | tests/test_walladiabatic.py | Python | apache-2.0 | 2,600 |
from django.test import TestCase
from makam import models
class ComposerTest(TestCase):
def setUp(self):
self.c1 = models.Composer.objects.create(name="c1")
self.c2 = models.Composer.objects.create(name="c2")
self.w1 = models.Work.objects.create(title="w1")
self.w1.lyricists.add(self.c1)
self.w1.composers.add(self.c2)
def test_works(self):
self.assertTrue(self.w1 in self.c1.lyricworklist())
self.assertEqual(0, len(self.c1.worklist()))
def test_lyrics_works(self):
self.assertTrue(self.w1 in self.c2.worklist())
self.assertEqual(0, len(self.c2.lyricworklist()))
| MTG/dunya | makam/test/test_composer.py | Python | agpl-3.0 | 654 |
def pentagonal(x):
return x * (3 * x - 1) // 2
SIZE = 10000
cache = {pentagonal(i) for i in range(1, SIZE + 1)}
def p44():
for i in range(1, SIZE):
pi = pentagonal(i)
for j in range(i + 1, SIZE + 1):
pj = pentagonal(j)
pd = pj - pi
ps = pj + pi
if pd in cache and ps in cache:
return pd
print(p44())
| pillowsham/studies | projecteuler/p44.py | Python | gpl-3.0 | 394 |
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <[email protected]>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for X11.
"""
__revision__ = "$Rev: 2568 $"
__date__ = "$Date: 2008-01-17 10:32:32 -0600 (Thu, 17 Jan 2008) $"
__author__ = "$Author: hawk $"
import os
from shotfactory04.gui import linux as base
class Gui(base.Gui):
"""
Special functions for Kazehakase.
"""
def reset_browser(self):
"""
Delete browser cache.
"""
home = os.environ['HOME']
self.delete_if_exists(os.path.join(
home, '.kazehakase', 'mozilla', '*', 'Cache'))
self.delete_if_exists(os.path.join(
home, '.kazehakase', 'mozilla', '*', 'history.dat'))
self.delete_if_exists(os.path.join(
home, '.kazehakase', 'mozilla', '*', 'cookies.txt'))
self.delete_if_exists(os.path.join(
home, '.kazehakase', 'favicon'))
def focus_browser(self):
"""
Focus on the browser window.
"""
self.shell('xte "mousemove 200 4"')
self.shell('xte "mouseclick 1"')
self.shell('xte "key Tab"')
self.shell('xte "key Tab"')
| hugs/shotfactory | shotfactory04/gui/linux/kazehakase.py | Python | gpl-3.0 | 1,852 |
"""Represent SGF games.
This is intended for use with SGF FF[4]; see http://www.red-bean.com/sgf/
"""
import datetime
from gomill import sgf_grammar
from gomill import sgf_properties
class Node(object):
"""An SGF node.
Instantiate with a raw property map (see sgf_grammar) and an
sgf_properties.Presenter.
A Node doesn't belong to a particular game (cf Tree_node below), but it
knows its board size (in order to interpret move values) and the encoding
to use for the raw property strings.
Changing the SZ property isn't allowed.
"""
def __init__(self, property_map, presenter):
# Map identifier (PropIdent) -> nonempty list of raw values
self._property_map = property_map
self._presenter = presenter
def get_size(self):
"""Return the board size used to interpret property values."""
return self._presenter.size
def get_encoding(self):
"""Return the encoding used for raw property values.
Returns a string (a valid Python codec name, eg "UTF-8").
"""
return self._presenter.encoding
def get_presenter(self):
"""Return the node's sgf_properties.Presenter."""
return self._presenter
def has_property(self, identifier):
"""Check whether the node has the specified property."""
return identifier in self._property_map
def properties(self):
"""Find the properties defined for the node.
Returns a list of property identifiers, in unspecified order.
"""
return self._property_map.keys()
def get_raw_list(self, identifier):
"""Return the raw values of the specified property.
Returns a nonempty list of 8-bit strings, in the raw property encoding.
The strings contain the exact bytes that go between the square brackets
(without interpreting escapes or performing any whitespace conversion).
Raises KeyError if there was no property with the given identifier.
(If the property is an empty elist, this returns a list containing a
single empty string.)
"""
return self._property_map[identifier]
def get_raw(self, identifier):
"""Return a single raw value of the specified property.
Returns an 8-bit string, in the raw property encoding.
The string contains the exact bytes that go between the square brackets
(without interpreting escapes or performing any whitespace conversion).
Raises KeyError if there was no property with the given identifier.
If the property has multiple values, this returns the first (if the
value is an empty elist, this returns an empty string).
"""
return self._property_map[identifier][0]
def get_raw_property_map(self):
"""Return the raw values of all properties as a dict.
Returns a dict mapping property identifiers to lists of raw values
(see get_raw_list()).
Returns the same dict each time it's called.
Treat the returned dict as read-only.
"""
return self._property_map
def _set_raw_list(self, identifier, values):
if identifier == "SZ" and values != [str(self._presenter.size)]:
raise ValueError("changing size is not permitted")
self._property_map[identifier] = values
def unset(self, identifier):
"""Remove the specified property.
Raises KeyError if the property isn't currently present.
"""
if identifier == "SZ" and self._presenter.size != 19:
raise ValueError("changing size is not permitted")
del self._property_map[identifier]
def set_raw_list(self, identifier, values):
"""Set the raw values of the specified property.
identifier -- ascii string passing is_valid_property_identifier()
values -- nonempty iterable of 8-bit strings in the raw property
encoding
The values specify the exact bytes to appear between the square
brackets in the SGF file; you must perform any necessary escaping
first.
(To specify an empty elist, pass a list containing a single empty
string.)
"""
if not sgf_grammar.is_valid_property_identifier(identifier):
raise ValueError("ill-formed property identifier")
values = list(values)
if not values:
raise ValueError("empty property list")
for value in values:
if not sgf_grammar.is_valid_property_value(value):
raise ValueError("ill-formed raw property value")
self._set_raw_list(identifier, values)
def set_raw(self, identifier, value):
"""Set the specified property to a single raw value.
identifier -- ascii string passing is_valid_property_identifier()
value -- 8-bit string in the raw property encoding
The value specifies the exact bytes to appear between the square
brackets in the SGF file; you must perform any necessary escaping
first.
"""
if not sgf_grammar.is_valid_property_identifier(identifier):
raise ValueError("ill-formed property identifier")
if not sgf_grammar.is_valid_property_value(value):
raise ValueError("ill-formed raw property value")
self._set_raw_list(identifier, [value])
def get(self, identifier):
"""Return the interpreted value of the specified property.
Returns the value as a suitable Python representation.
Raises KeyError if the node does not have a property with the given
identifier.
Raises ValueError if it cannot interpret the value.
See sgf_properties.Presenter.interpret() for details.
"""
return self._presenter.interpret(
identifier, self._property_map[identifier])
def set(self, identifier, value):
"""Set the value of the specified property.
identifier -- ascii string passing is_valid_property_identifier()
value -- new property value (in its Python representation)
For properties with value type 'none', use value True.
Raises ValueError if it cannot represent the value.
See sgf_properties.Presenter.serialise() for details.
"""
self._set_raw_list(
identifier, self._presenter.serialise(identifier, value))
def get_raw_move(self):
"""Return the raw value of the move from a node.
Returns a pair (colour, raw value)
colour is 'b' or 'w'.
Returns None, None if the node contains no B or W property.
"""
values = self._property_map.get("B")
if values is not None:
colour = "b"
else:
values = self._property_map.get("W")
if values is not None:
colour = "w"
else:
return None, None
return colour, values[0]
def get_move(self):
"""Retrieve the move from a node.
Returns a pair (colour, move)
colour is 'b' or 'w'.
move is (row, col), or None for a pass.
Returns None, None if the node contains no B or W property.
"""
colour, raw = self.get_raw_move()
if colour is None:
return None, None
return (colour,
sgf_properties.interpret_go_point(raw, self._presenter.size))
def get_setup_stones(self):
"""Retrieve Add Black / Add White / Add Empty properties from a node.
Returns a tuple (black_points, white_points, empty_points)
Each value is a set of pairs (row, col).
"""
try:
bp = self.get("AB")
except KeyError:
bp = set()
try:
wp = self.get("AW")
except KeyError:
wp = set()
try:
ep = self.get("AE")
except KeyError:
ep = set()
return bp, wp, ep
def has_setup_stones(self):
"""Check whether the node has any AB/AW/AE properties."""
d = self._property_map
return ("AB" in d or "AW" in d or "AE" in d)
def set_move(self, colour, move):
"""Set the B or W property.
colour -- 'b' or 'w'.
move -- (row, col), or None for a pass.
Replaces any existing B or W property in the node.
"""
if colour not in ('b', 'w'):
raise ValueError
if 'B' in self._property_map:
del self._property_map['B']
if 'W' in self._property_map:
del self._property_map['W']
self.set(colour.upper(), move)
def set_setup_stones(self, black, white, empty=None):
"""Set Add Black / Add White / Add Empty properties.
black, white, empty -- list or set of pairs (row, col)
Removes any existing AB/AW/AE properties from the node.
"""
if 'AB' in self._property_map:
del self._property_map['AB']
if 'AW' in self._property_map:
del self._property_map['AW']
if 'AE' in self._property_map:
del self._property_map['AE']
if black:
self.set('AB', black)
if white:
self.set('AW', white)
if empty:
self.set('AE', empty)
def add_comment_text(self, text):
"""Add or extend the node's comment.
If the node doesn't have a C property, adds one with the specified
text.
Otherwise, adds the specified text to the existing C property value
(with two newlines in front).
"""
if self.has_property('C'):
self.set('C', self.get('C') + "\n\n" + text)
else:
self.set('C', text)
def __str__(self):
def format_property(ident, values):
return ident + "".join("[%s]" % s for s in values)
return "\n".join(
format_property(ident, values)
for (ident, values) in sorted(self._property_map.items())) \
+ "\n"
class Tree_node(Node):
"""A node embedded in an SGF game.
A Tree_node is a Node that also knows its position within an Sgf_game.
Do not instantiate directly; retrieve from an Sgf_game or another Tree_node.
A Tree_node is a list-like container of its children: it can be indexed,
sliced, and iterated over like a list, and supports index().
A Tree_node with no children is treated as having truth value false.
Public attributes (treat as read-only):
owner -- the node's Sgf_game
parent -- the nodes's parent Tree_node (None for the root node)
"""
def __init__(self, parent, properties):
self.owner = parent.owner
self.parent = parent
self._children = []
Node.__init__(self, properties, parent._presenter)
def _add_child(self, node):
self._children.append(node)
def __len__(self):
return len(self._children)
def __getitem__(self, key):
return self._children[key]
def index(self, child):
return self._children.index(child)
def new_child(self, index=None):
"""Create a new Tree_node and add it as this node's last child.
If 'index' is specified, the new node is inserted in the child list at
the specified index instead (behaves like list.insert).
Returns the new node.
"""
child = Tree_node(self, {})
if index is None:
self._children.append(child)
else:
self._children.insert(index, child)
return child
def delete(self):
"""Remove this node from its parent."""
if self.parent is None:
raise ValueError("can't remove the root node")
self.parent._children.remove(self)
def reparent(self, new_parent, index=None):
"""Move this node to a new place in the tree.
new_parent -- Tree_node from the same game.
Raises ValueError if the new parent is this node or one of its
descendants.
If 'index' is specified, the node is inserted in the new parent's child
list at the specified index (behaves like list.insert); otherwise it's
placed at the end.
"""
if new_parent.owner != self.owner:
raise ValueError("new parent doesn't belong to the same game")
n = new_parent
while True:
if n == self:
raise ValueError("would create a loop")
n = n.parent
if n is None:
break
# self.parent is not None because moving the root would create a loop.
self.parent._children.remove(self)
self.parent = new_parent
if index is None:
new_parent._children.append(self)
else:
new_parent._children.insert(index, self)
def find(self, identifier):
"""Find the nearest ancestor-or-self containing the specified property.
Returns a Tree_node, or None if there is no such node.
"""
node = self
while node is not None:
if node.has_property(identifier):
return node
node = node.parent
return None
def find_property(self, identifier):
"""Return the value of a property, defined at this node or an ancestor.
This is intended for use with properties of type 'game-info', and with
properties with the 'inherit' attribute.
This returns the interpreted value, in the same way as get().
It searches up the tree, in the same way as find().
Raises KeyError if no node defining the property is found.
"""
node = self.find(identifier)
if node is None:
raise KeyError
return node.get(identifier)
class _Root_tree_node(Tree_node):
"""Variant of Tree_node used for a game root."""
def __init__(self, property_map, owner):
self.owner = owner
self.parent = None
self._children = []
Node.__init__(self, property_map, owner.presenter)
class _Unexpanded_root_tree_node(_Root_tree_node):
"""Variant of _Root_tree_node used with 'loaded' Sgf_games."""
def __init__(self, owner, coarse_tree):
_Root_tree_node.__init__(self, coarse_tree.sequence[0], owner)
self._coarse_tree = coarse_tree
def _expand(self):
sgf_grammar.make_tree(
self._coarse_tree, self, Tree_node, Tree_node._add_child)
delattr(self, '_coarse_tree')
self.__class__ = _Root_tree_node
def __len__(self):
self._expand()
return self.__len__()
def __getitem__(self, key):
self._expand()
return self.__getitem__(key)
def index(self, child):
self._expand()
return self.index(child)
def new_child(self, index=None):
self._expand()
return self.new_child(index)
def _main_sequence_iter(self):
presenter = self._presenter
for properties in sgf_grammar.main_sequence_iter(self._coarse_tree):
yield Node(properties, presenter)
class Sgf_game(object):
"""An SGF game tree.
The complete game tree is represented using Tree_nodes. The various methods
which return Tree_nodes will always return the same object for the same
node.
Instantiate with
size -- int (board size), in range 1 to 26
encoding -- the raw property encoding (default "UTF-8")
'encoding' must be a valid Python codec name.
The following root node properties are set for newly-created games:
FF[4]
GM[1]
SZ[size]
CA[encoding]
Changing FF and GM is permitted (but this library will carry on using the
FF[4] and GM[1] rules). Changing SZ is not permitted (unless the change
leaves the effective value unchanged). Changing CA is permitted; this
controls the encoding used by serialise().
"""
def __new__(cls, size, encoding="UTF-8", *args, **kwargs):
# To complete initialisation after this, you need to set 'root'.
if not 1 <= size <= 26:
raise ValueError("size out of range: %s" % size)
game = super(Sgf_game, cls).__new__(cls)
game.size = size
game.presenter = sgf_properties.Presenter(size, encoding)
return game
def __init__(self, *args, **kwargs):
self.root = _Root_tree_node({}, self)
self.root.set_raw('FF', "4")
self.root.set_raw('GM', "1")
self.root.set_raw('SZ', str(self.size))
# Read the encoding back so we get the normalised form
self.root.set_raw('CA', self.presenter.encoding)
@classmethod
def from_coarse_game_tree(cls, coarse_game, override_encoding=None):
"""Alternative constructor: create an Sgf_game from the parser output.
coarse_game -- Coarse_game_tree
override_encoding -- encoding name, eg "UTF-8" (optional)
The nodes' property maps (as returned by get_raw_property_map()) will
be the same dictionary objects as the ones from the Coarse_game_tree.
The board size and raw property encoding are taken from the SZ and CA
properties in the root node (defaulting to 19 and "ISO-8859-1",
respectively).
If override_encoding is specified, the source data is assumed to be in
the specified encoding (no matter what the CA property says), and the
CA property is set to match.
"""
try:
size_s = coarse_game.sequence[0]['SZ'][0]
except KeyError:
size = 19
else:
try:
size = int(size_s)
except ValueError:
raise ValueError("bad SZ property: %s" % size_s)
if override_encoding is None:
try:
encoding = coarse_game.sequence[0]['CA'][0]
except KeyError:
encoding = "ISO-8859-1"
else:
encoding = override_encoding
game = cls.__new__(cls, size, encoding)
game.root = _Unexpanded_root_tree_node(game, coarse_game)
if override_encoding is not None:
game.root.set_raw("CA", game.presenter.encoding)
return game
@classmethod
def from_string(cls, s, override_encoding=None):
"""Alternative constructor: read a single Sgf_game from a string.
s -- 8-bit string
Raises ValueError if it can't parse the string. See parse_sgf_game()
for details.
See from_coarse_game_tree for details of size and encoding handling.
"""
coarse_game = sgf_grammar.parse_sgf_game(s)
return cls.from_coarse_game_tree(coarse_game, override_encoding)
def serialise(self, wrap=79):
"""Serialise the SGF data as a string.
wrap -- int (default 79), or None
Returns an 8-bit string, in the encoding specified by the CA property
in the root node (defaulting to "ISO-8859-1").
If the raw property encoding and the target encoding match (which is
the usual case), the raw property values are included unchanged in the
output (even if they are improperly encoded.)
Otherwise, if any raw property value is improperly encoded,
UnicodeDecodeError is raised, and if any property value can't be
represented in the target encoding, UnicodeEncodeError is raised.
If the target encoding doesn't identify a Python codec, ValueError is
raised. Behaviour is unspecified if the target encoding isn't
ASCII-compatible (eg, UTF-16).
If 'wrap' is not None, makes some effort to keep output lines no longer
than 'wrap'.
"""
try:
encoding = self.get_charset()
except ValueError:
raise ValueError("unsupported charset: %s" %
self.root.get_raw_list("CA"))
coarse_tree = sgf_grammar.make_coarse_game_tree(
self.root, lambda node:node, Node.get_raw_property_map)
serialised = sgf_grammar.serialise_game_tree(coarse_tree, wrap)
if encoding == self.root.get_encoding():
return serialised
else:
return serialised.decode(self.root.get_encoding()).encode(encoding)
def get_property_presenter(self):
"""Return the property presenter.
Returns an sgf_properties.Presenter.
This can be used to customise how property values are interpreted and
serialised.
"""
return self.presenter
def get_root(self):
"""Return the root node (as a Tree_node)."""
return self.root
def get_last_node(self):
"""Return the last node in the 'leftmost' variation (as a Tree_node)."""
node = self.root
while node:
node = node[0]
return node
def get_main_sequence(self):
"""Return the 'leftmost' variation.
Returns a list of Tree_nodes, from the root to a leaf.
"""
node = self.root
result = [node]
while node:
node = node[0]
result.append(node)
return result
def get_main_sequence_below(self, node):
"""Return the 'leftmost' variation below the specified node.
node -- Tree_node
Returns a list of Tree_nodes, from the first child of 'node' to a leaf.
"""
if node.owner is not self:
raise ValueError("node doesn't belong to this game")
result = []
while node:
node = node[0]
result.append(node)
return result
def get_sequence_above(self, node):
"""Return the partial variation leading to the specified node.
node -- Tree_node
Returns a list of Tree_nodes, from the root to the parent of 'node'.
"""
if node.owner is not self:
raise ValueError("node doesn't belong to this game")
result = []
while node.parent is not None:
node = node.parent
result.append(node)
result.reverse()
return result
def main_sequence_iter(self):
"""Provide the 'leftmost' variation as an iterator.
Returns an iterator providing Node instances, from the root to a leaf.
The Node instances may or may not be Tree_nodes.
It's OK to use these Node instances to modify properties: even if they
are not the same objects as returned by the main tree navigation
methods, they share the underlying property maps.
If you know the game has no variations, or you're only interested in
the 'leftmost' variation, you can use this function to retrieve the
nodes without building the entire game tree.
"""
if isinstance(self.root, _Unexpanded_root_tree_node):
return self.root._main_sequence_iter()
return iter(self.get_main_sequence())
def extend_main_sequence(self):
"""Create a new Tree_node and add to the 'leftmost' variation.
Returns the new node.
"""
return self.get_last_node().new_child()
def get_size(self):
"""Return the board size as an integer."""
return self.size
def get_charset(self):
"""Return the effective value of the CA root property.
This applies the default, and returns the normalised form.
Raises ValueError if the CA property doesn't identify a Python codec.
"""
try:
s = self.root.get("CA")
except KeyError:
return "ISO-8859-1"
try:
return sgf_properties.normalise_charset_name(s)
except LookupError:
raise ValueError("no codec available for CA %s" % s)
def get_komi(self):
"""Return the komi as a float.
Returns 0.0 if the KM property isn't present in the root node.
Raises ValueError if the KM property is malformed.
"""
try:
return self.root.get("KM")
except KeyError:
return 0.0
def get_handicap(self):
"""Return the number of handicap stones as a small integer.
Returns None if the HA property isn't present, or has (illegal) value
zero.
Raises ValueError if the HA property is otherwise malformed.
"""
try:
handicap = self.root.get("HA")
except KeyError:
return None
if handicap == 0:
handicap = None
elif handicap == 1:
raise ValueError
return handicap
def get_player_name(self, colour):
"""Return the name of the specified player.
Returns None if there is no corresponding 'PB' or 'PW' property.
"""
try:
return self.root.get({'b' : 'PB', 'w' : 'PW'}[colour])
except KeyError:
return None
def get_winner(self):
"""Return the colour of the winning player.
Returns None if there is no RE property, or if neither player won.
"""
try:
colour = self.root.get("RE")[0].lower()
except LookupError:
return None
if colour not in ("b", "w"):
return None
return colour
def set_date(self, date=None):
"""Set the DT property to a single date.
date -- datetime.date (defaults to today)
(SGF allows dates to be rather more complicated than this, so there's
no corresponding get_date() method.)
"""
if date is None:
date = datetime.date.today()
self.root.set('DT', date.strftime("%Y-%m-%d"))
| inclement/noGo | noGo/ext/gomill/sgf.py | Python | gpl-3.0 | 25,680 |
#!/usr/bin/env python
import subprocess
import datetime
import praw
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit me!
challengePageSubmissionId = 'egfht9'
flaskport = 8997
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
global commentHashesAndComments
global submission
currentDayOfMonthIndex = datetime.date.today().day
currentMonthIndex = datetime.date.today().month
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 14 and currentMonthIndex == 1
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
# stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
bodyEncodedForMLCorpus = str(request.form["bodyencodedformlcorpus"])
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusCheckin(bodyEncodedForMLCorpus)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSignupAndCheckin(bodyEncodedForMLCorpus)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusRelapse(bodyEncodedForMLCorpus)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusReinstate(bodyEncodedForMLCorpus)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusTooLate(bodyEncodedForMLCorpus)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
def recordMLCorpusCheckin(aString):
with open("../new-ml-corpus-year-long-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSignupAndCheckin(aString):
with open("../new-ml-corpus-year-long-signup-and-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusRelapse(aString):
with open("../new-ml-corpus-year-long-relapse.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusReinstate(aString):
with open("../new-ml-corpus-year-long-reinstate.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusTooLate(aString):
with open("../new-ml-corpus-year-long-too-late.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSkip(aString):
with open("../new-ml-corpus-year-long-skip.txt", "a") as f:
f.write(aString)
f.write("\n")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| foobarbazblarg/stayclean | stayclean-2019/serve-challenge-with-flask.py | Python | mit | 12,349 |
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
import os
import sys
import pdb
import glob
import httplib
import urlparse
import time
import commands
import rpm
import rpmUtils
import rpmUtils.miscutils
try:
import hashlib as md5
except:
import md5
import logging
import signal
from optparse import Option, OptionParser
from xmlrpclib import Fault
from grinder.ParallelFetch import ParallelFetch
from grinder.KickstartFetch import KickstartFetch
from grinder.rhn_api import RhnApi
from grinder.rhn_api import getRhnApi
from grinder.rhn_transport import RHNTransport
from grinder.ParallelFetch import ParallelFetch
from grinder.PackageFetch import PackageFetch
from grinder.GrinderExceptions import *
from grinder.SatDumpClient import SatDumpClient
from grinder.RHNComm import RHNComm
GRINDER_LOG_FILENAME = "./log-grinder.out"
LOG = logging.getLogger("grinder")
def setupLogging(verbose):
logging.basicConfig(filename=GRINDER_LOG_FILENAME, level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M', filemode='w')
console = logging.StreamHandler()
if verbose:
console.setLevel(logging.DEBUG)
else:
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def processCommandline():
"process the commandline, setting the OPTIONS object"
optionsTable = [
Option('-a', '--all', action='store_true',
help='Fetch ALL packages from a channel, not just latest'),
Option('-b', '--basepath', action='store', help='Path RPMs are stored'),
Option('-c', '--cert', action='store', help='Entitlement Certificate'),
Option('-C', '--config', action='store', help='Configuration file',
default='/etc/grinder/grinder.yml'),
Option('-k', '--kickstarts', action='store_true', help='Sync all kickstart trees for channels specified'),
Option('-K', '--skippackages', action='store_true', help='Skip sync of packages', default=False),
Option('-L', '--listchannels', action='store_true', help='List all channels we have access to synchronize'),
Option('-p', '--password', action='store', help='RHN Passowrd'),
Option('-P', '--parallel', action='store',
help='Number of threads to fetch in parallel.'),
Option('-r', '--removeold', action='store_true', help='Remove older rpms'),
Option('-s', '--systemid', action='store', help='System ID'),
Option('-u', '--username', action='store', help='RHN User Account'),
Option('-U', '--url', action='store', help='Red Hat Server URL'),
Option('-v', '--verbose', action='store_true', help='verbose output', default=False),
]
optionParser = OptionParser(option_list=optionsTable, usage="%prog [OPTION] channel_label1 channel_label2, etc")
global OPTIONS, args
OPTIONS, args = optionParser.parse_args()
class Grinder:
def __init__(self, url, username, password, cert, systemid, parallel, verbose):
self.baseURL = url
self.cert = open(cert, 'r').read()
self.systemid = open(systemid, 'r').read()
self.username = username
self.password = password
self.parallel = parallel
self.fetchAll = False #default is only fetch latest packages
self.parallelFetchPkgs = None
self.parallelFetchKickstarts = None
self.skipProductList = []
self.skipPackageList = []
self.verbose = verbose
self.killcount = 0
self.removeOldPackages = False
self.numOldPkgsKeep = 1
self.rhnComm = RHNComm(url, self.systemid)
def setRemoveOldPackages(self, value):
self.removeOldPackages = value
def getRemoveOldPackages(self):
return self.removeOldPackages
def getFetchAllPackages(self):
return self.fetchAll
def setFetchAllPackages(self, val):
self.fetchAll = val
def getSkipProductList(self):
return self.skipProductList
def setSkipProductList(self, skipProductList):
self.skipProductList = skipProductList
def getSkipPackageList(self):
return self.skipPackageList
def setSkipPackageList(self, skipPackageList):
self.skipPackageList = skipPackageList
def getNumOldPackagesToKeep(self):
return self.numOldPkgsKeep
def setNumOldPackagesToKeep(self, num):
self.numOldPkgsKeep = num
def deactivate(self):
SATELLITE_URL = "%s/rpc/api" % (self.baseURL)
client = getRhnApi(SATELLITE_URL, verbose=0)
key = client.auth.login(self.username, self.password)
retval = client.satellite.deactivateSatellite(self.systemid)
print "retval from deactivation: %s" % retval
client.auth.logout(key)
print "Deactivated!"
def activate(self):
rhn = RHNTransport()
satClient = getRhnApi(self.baseURL + "/SAT",
verbose=self.verbose, transport=rhn)
# First check if we are active
active = False
retval = satClient.authentication.check(self.systemid)
LOG.debug("AUTH CHECK: %s " % str(retval))
if (retval == 1):
LOG.debug("We are activated ... continue!")
active = True
else:
LOG.debug("Not active")
if (not active):
if(not self.username or not self.password):
raise SystemNotActivatedException()
SATELLITE_URL = "%s/rpc/api" % (self.baseURL)
client = RhnApi(SATELLITE_URL, verbose=0)
key = client.auth.login(self.username, self.password)
retval = client.satellite.activateSatellite(self.systemid, self.cert)
LOG.debug("retval from activation: %s" % retval)
if (retval != 1):
raise CantActivateException()
client.auth.logout(key)
LOG.debug("Activated!")
def stop(self):
if (self.parallelFetchPkgs):
self.parallelFetchPkgs.stop()
if (self.parallelFetchKickstarts):
self.parallelFetchKickstarts.stop()
def checkChannels(self, channelsToSync):
"""
Input:
channelsToSync - list of channels to sync
Output:
list containing bad channel names
"""
satDump = SatDumpClient(self.baseURL)
channelFamilies = satDump.getChannelFamilies(self.systemid)
badChannel = []
for channelLabel in channelsToSync:
found = False
for d in channelFamilies.values():
if channelLabel in d["channel_labels"]:
LOG.debug("Found %s under %s" % (channelLabel, d["label"]))
found = True
break
if not found:
LOG.debug("Unable to find %s, adding it to badChannel list" % (channelLabel))
badChannel.append(channelLabel)
return badChannel
def getChannelLabels(self):
labels = {}
satDump = SatDumpClient(self.baseURL)
channelFamilies = satDump.getChannelFamilies(self.systemid)
for d in channelFamilies.values():
if (d["label"] in self.skipProductList):
continue
labels[d["label"]] = d["channel_labels"]
return labels
def displayListOfChannels(self):
labels = self.getChannelLabels()
print("List of channels:")
for lbl in labels:
print("\nProduct : %s\n" % (lbl))
for l in labels[lbl]:
print(" %s" % (l))
def syncKickstarts(self, channelLabel, savePath, verbose=0):
"""
channelLabel - channel to sync kickstarts from
savePath - path to save kickstarts
verbose - if true display more output
"""
startTime = time.time()
satDump = SatDumpClient(self.baseURL, verbose=verbose)
ksLabels = satDump.getKickstartLabels(self.systemid, [channelLabel])
LOG.info("Found %s kickstart labels for channel %s" % (len(ksLabels[channelLabel]), channelLabel))
ksFiles = []
for ksLbl in ksLabels[channelLabel]:
LOG.info("Syncing kickstart label: %s" % (ksLbl))
metadata = satDump.getKickstartTreeMetadata(self.systemid, [ksLbl])
LOG.info("Retrieved metadata on %s files for kickstart label: %s" % (len(metadata[ksLbl]["files"]), ksLbl))
ksSavePath = os.path.join(savePath, ksLbl)
for ksFile in metadata[ksLbl]["files"]:
info = {}
info["relative-path"] = ksFile["relative-path"]
info["size"] = ksFile["file-size"]
info["md5sum"] = ksFile["md5sum"]
info["ksLabel"] = ksLbl
info["channelLabel"] = channelLabel
info["savePath"] = ksSavePath
ksFiles.append(info)
ksFetch = KickstartFetch(self.systemid, self.baseURL)
numThreads = int(self.parallel)
self.parallelFetchKickstarts = ParallelFetch(ksFetch, numThreads)
self.parallelFetchKickstarts.addItemList(ksFiles)
self.parallelFetchKickstarts.start()
report = self.parallelFetchKickstarts.waitForFinish()
endTime = time.time()
LOG.info("Processed %s %s %s kickstart files, %s errors, completed in %s seconds" \
% (channelLabel, ksLabels[channelLabel], report.successes,
report.errors, (endTime-startTime)))
return report
def syncPackages(self, channelLabel, savePath, verbose=0):
"""
channelLabel - channel to sync packages from
savePath - path to save packages
verbose - if true display more output
"""
startTime = time.time()
if channelLabel == "":
LOG.critical("No channel label specified to sync, abort sync.")
raise NoChannelLabelException()
LOG.info("sync(%s, %s) invoked" % (channelLabel, verbose))
satDump = SatDumpClient(self.baseURL, verbose=verbose)
LOG.debug("*** calling product_names ***")
packages = satDump.getChannelPackages(self.systemid, channelLabel)
LOG.info("%s packages are available, getting list of short metadata now." % (len(packages)))
pkgInfo = satDump.getShortPackageInfo(self.systemid, packages, filterLatest = not self.fetchAll)
LOG.info("%s packages have been marked to be fetched" % (len(pkgInfo.values())))
numThreads = int(self.parallel)
LOG.info("Running in parallel fetch mode with %s threads" % (numThreads))
pkgFetch = PackageFetch(self.systemid, self.baseURL, channelLabel, savePath)
self.parallelFetchPkgs = ParallelFetch(pkgFetch, numThreads)
self.parallelFetchPkgs.addItemList(pkgInfo.values())
self.parallelFetchPkgs.start()
report = self.parallelFetchPkgs.waitForFinish()
LOG.debug("Attempting to fetch comps.xml info from RHN")
self.fetchCompsXML(savePath, channelLabel)
self.fetchUpdateinfo(savePath, channelLabel)
endTime = time.time()
LOG.info("Processed <%s> %s packages, %s errors, completed in %s seconds" \
% (channelLabel, report.successes, report.errors, (endTime-startTime)))
if self.removeOldPackages:
LOG.info("Remove old packages from %s" % (savePath))
self.runRemoveOldPackages(savePath)
return report
def fetchCompsXML(self, savePath, channelLabel):
###
# Fetch comps.xml, used by createrepo for "groups" info
###
compsxml = ""
try:
compsxml = self.rhnComm.getRepodata(channelLabel, "comps.xml")
except GetRequestException, ge:
if (ge.code == 404):
LOG.info("Channel has no compsXml")
else:
raise ge
if not savePath:
savePath = channelLabel
f = open(os.path.join(savePath, "comps.xml"), "w")
f.write(compsxml)
f.close()
def fetchUpdateinfo(self, savePath, channelLabel):
"""
Fetch updateinfo.xml.gz used by yum security plugin
"""
import gzip
updateinfo_gz = ""
try:
updateinfo_gz = self.rhnComm.getRepodata(channelLabel, "updateinfo.xml.gz")
except GetRequestException, ge:
if (ge.code == 404):
LOG.info("Channel has no Updateinfo")
else:
raise ge
if not savePath:
savePath = channelLabel
fname = os.path.join(savePath, "updateinfo.xml.gz")
f = open(fname, 'wb');
f.write(updateinfo_gz)
f.close()
f = open(os.path.join(savePath,"updateinfo.xml"), 'w')
f.write(gzip.open(fname, 'r').read())
f.close()
def getNEVRA(self, filename):
fd = os.open(filename, os.O_RDONLY)
ts = rpm.TransactionSet()
ts.setVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS))
h = ts.hdrFromFdno(fd)
os.close(fd)
ts.closeDB()
d = {}
d["filename"] = filename
for key in ["name", "epoch", "version", "release", "arch"]:
d[key] = h[key]
return d
def getListOfSyncedRPMs(self, path):
"""
Returns a dictionary where the key is name.arch
and the value is a list of dict entries
[{"name", "version", "release", "epoch", "arch", "filename"}]
"""
rpms = {}
rpmFiles = glob.glob(path+"/*.rpm")
for filename in rpmFiles:
info = self.getNEVRA(filename)
key = info["name"] + "." + info["arch"]
if not rpms.has_key(key):
rpms[key] = []
rpms[key].append(info)
return rpms
def getSortedListOfSyncedRPMs(self, path):
"""
Returns a dictionary with key of 'name.arch' which has values sorted in descending order
i.e. latest rpm is the first element on the list
Values in dictionary are a list of:
[{"name", "version", "release", "epoch", "arch", "filename"}]
"""
rpms = self.getListOfSyncedRPMs(path)
for key in rpms:
rpms[key].sort(lambda a, b:
rpmUtils.miscutils.compareEVR(
(a["epoch"], a["version"], a["release"]),
(b["epoch"], b["version"], b["release"])), reverse=True)
return rpms
def runRemoveOldPackages(self, path, numOld=None):
"""
Will scan 'path'.
The current RPM and 'numOld' or prior releases are kept, all other RPMs are deleted
"""
if numOld == None:
numOld = self.numOldPkgsKeep
if numOld < 0:
numOld = 0
LOG.info("Will keep latest package and %s older packages" % (numOld))
rpms = self.getSortedListOfSyncedRPMs(path)
for key in rpms:
values = rpms[key]
if len(values) > numOld:
# Remember to keep the latest package
for index in range(1+numOld, len(values)):
fname = values[index]['filename']
LOG.info("index = %s Removing: %s" % (index, fname))
os.remove(fname)
def createRepo(self, dir):
startTime = time.time()
status, out = commands.getstatusoutput('createrepo --update -g comps.xml %s' % (dir))
class CreateRepoError:
def __init__(self, output):
self.output = output
def __str__(self):
return self.output
if status != 0:
raise CreateRepoError(out)
endTime = time.time()
LOG.info("createrepo on %s finished in %s seconds" % (dir, (endTime-startTime)))
return status, out
def updateRepo(self, updatepath, repopath):
startTime = time.time()
status, out = commands.getstatusoutput('modifyrepo %s %s' % (updatepath, repopath))
class CreateRepoError:
def __init__(self, output):
self.output = output
def __str__(self):
return self.output
if status != 0:
raise CreateRepoError(out)
endTime = time.time()
LOG.info("updaterepo on %s finished in %s seconds" % (repopath, (endTime-startTime)))
return status, out
_LIBPATH = "/usr/share/"
# add to the path if need be
if _LIBPATH not in sys.path:
sys.path.append(_LIBPATH)
#
# Registering a signal handler on SIGINT to help in the
# parallel case, when we need a way to stop all the threads
# when someone CTRL-C's
#
def handleKeyboardInterrupt(signalNumer, frame):
if (GRINDER.killcount > 0):
LOG.error("force quitting.")
sys.exit()
if (GRINDER.killcount == 0):
GRINDER.killcount = 1
msg = "SIGINT caught, will finish currently downloading" + \
" packages and exit. Press CTRL+C again to force quit"
LOG.error(msg)
GRINDER.stop()
signal.signal(signal.SIGINT, handleKeyboardInterrupt)
def main():
LOG.debug("Main executed")
processCommandline()
verbose = OPTIONS.verbose
setupLogging(verbose)
configFile = OPTIONS.config
configInfo = {}
if os.path.isfile(configFile):
try:
import yaml
raw = open(configFile).read()
configInfo = yaml.load(raw)
except ImportError:
LOG.critical("Unable to load python module 'yaml'.")
LOG.critical("Unable to parse config file: %s. Using command line options only." % (configFile))
configInfo = {}
except Exception, e:
LOG.critical("Exception: %s" % (e))
LOG.critical("Unable to parse config file: %s. Using command line options only." % (configFile))
configInfo = {}
else:
LOG.info("Unable to read configuration file: %s" % (configFile))
LOG.info("Will run with command line options only.")
if OPTIONS.all:
allPackages = OPTIONS.all
elif configInfo.has_key("all"):
allPackages = configInfo["all"]
else:
allPackages = False
LOG.debug("allPackages = %s" % (allPackages))
if OPTIONS.username:
username = OPTIONS.username
else:
username = None
LOG.debug("username = %s" % (username))
if OPTIONS.password:
password = OPTIONS.password
LOG.debug("password = from command line")
else:
password = None
LOG.debug("password never specified")
if OPTIONS.cert:
cert = OPTIONS.cert
elif configInfo.has_key("cert"):
cert = configInfo["cert"]
else:
cert = "/etc/sysconfig/rhn/entitlement-cert.xml"
LOG.debug("cert = %s" % (cert))
if OPTIONS.systemid:
systemid = OPTIONS.systemid
elif configInfo.has_key("systemid"):
systemid = configInfo["systemid"]
else:
systemid = "/etc/sysconfig/rhn/systemid"
LOG.debug("systemid = %s" % (systemid))
if OPTIONS.parallel:
parallel = int(OPTIONS.parallel)
elif configInfo.has_key("parallel"):
parallel = int(configInfo["parallel"])
else:
parallel = 5
LOG.debug("parallel = %s" % (parallel))
if OPTIONS.url:
url = OPTIONS.url
elif configInfo.has_key("url"):
url = configInfo["url"]
else:
url = "https://satellite.rhn.redhat.com"
LOG.debug("url = %s" % (url))
if OPTIONS.removeold:
removeold = OPTIONS.removeold
elif configInfo.has_key("removeold"):
removeold = configInfo["removeold"]
else:
removeold = False
LOG.debug("removeold = %s" % (removeold))
numOldPkgsKeep = 0
if configInfo.has_key("num_old_pkgs_keep"):
numOldPkgsKeep = int(configInfo["num_old_pkgs_keep"])
LOG.debug("numOldPkgsKeep = %s" % (numOldPkgsKeep))
if allPackages and removeold:
print "Conflicting options specified. Fetch ALL packages AND remove older packages."
print "This combination of options is not supported."
print "Please remove one of these options and re-try"
sys.exit(1)
if OPTIONS.basepath:
basepath = OPTIONS.basepath
elif configInfo.has_key("basepath"):
basepath = configInfo["basepath"]
else:
basepath = "./"
LOG.debug("basepath = %s" % (basepath))
channelLabels = {}
if len(args) > 0:
# CLI overrides config file, so if channels are specified on CLI
# that is all we will sync
for a in args:
channelLabels[a] = a
# FOR CLI syncing of channels, override basePath to ignore config file
# Result is channels get synced to CLI "-b" option or ./channel-label
if OPTIONS.basepath:
basepath = OPTIONS.basepath
else:
basepath = "./"
else:
if configInfo.has_key("channels"):
channels = configInfo["channels"]
for c in channels:
channelLabels[c['label']] = c['relpath']
listchannels = OPTIONS.listchannels
global GRINDER
GRINDER = Grinder(url, username, password, cert,
systemid, parallel, verbose)
GRINDER.setFetchAllPackages(allPackages)
GRINDER.setRemoveOldPackages(removeold)
GRINDER.setSkipProductList(["rh-public", "k12ltsp", "education"])
GRINDER.setNumOldPackagesToKeep(numOldPkgsKeep)
GRINDER.activate()
if (listchannels):
GRINDER.displayListOfChannels()
sys.exit(0)
badChannels = GRINDER.checkChannels(channelLabels.keys())
for b in badChannels:
print "'%s' can not be found as a channel available to download" % (b)
if len(badChannels) > 0:
sys.exit(1)
if len(channelLabels.keys()) < 1:
print "No channels specified to sync"
sys.exit(1)
for cl in channelLabels.keys():
dirPath = os.path.join(basepath, channelLabels[cl])
if OPTIONS.skippackages == False:
LOG.info("Syncing '%s' to '%s'" % (cl, dirPath))
startTime = time.time()
reportPkgs = GRINDER.syncPackages(cl, dirPath, verbose=verbose)
endTime = time.time()
pkgTime = endTime - startTime
if (GRINDER.killcount == 0):
LOG.info("Sync completed, running createrepo")
GRINDER.createRepo(dirPath)
# Update the repodata to include updateinfo
GRINDER.updateRepo(os.path.join(dirPath,"updateinfo.xml"), os.path.join(dirPath,"repodata/"))
if OPTIONS.kickstarts and GRINDER.killcount == 0:
startTime = time.time()
reportKSs = GRINDER.syncKickstarts(cl, dirPath, verbose=verbose)
endTime = time.time()
ksTime = endTime - startTime
if OPTIONS.skippackages == False:
LOG.info("Summary: Packages = %s in %s seconds" % (reportPkgs, pkgTime))
if OPTIONS.kickstarts:
LOG.info("Summary: Kickstarts = %s in %s seconds" % (reportKSs, ksTime))
if __name__ == "__main__":
main()
| Katello/grinder | src/grinder/Grinder.py | Python | gpl-2.0 | 23,793 |
import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96W DW96W BLANK BLANK BLANK BLANK BLANK
DW96W DW96W BLANK BLANK BLANK BLANK BLANK
DW96W DW96W BLANK BLANK BLANK BLANK BLANK
DW96W DW96W BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
##################################
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
# eventually row in 0,1,2,3
for row in [0]:
for offset in [0,1,2,3]:
# get tips
CurrentTipPosition = retrieveTips(CurrentTipPosition)
# pick up 250ul of selective media from C3, add to C2, mix
position(row,3,position = OffsetDict[offset])
aspirate(250,depth=99,speed=50, mix=0)
position(row,2,position = OffsetDict[offset])
dispense(250, depth=99, speed=100)
mix(330,98,100,5)
# discard tips
disposeTips()
position(0,0)
ShutDownRobot()
quit()
| tdlong/YeastRobot | UserPrograms/Rob_Sexual_DiploidSelection_1.py | Python | gpl-3.0 | 1,204 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
# __path__ = ["lib", ".", "kindleunpack"]
import sys
import codecs
import traceback
from .compatibility_utils import PY2, binary_type, utf8_str, unicode_str
from .compatibility_utils import unicode_argv, add_cp65001_codec
from .compatibility_utils import hexlify
add_cp65001_codec()
from .unipath import pathof
if PY2:
range = xrange
# since will be printing unicode under python 2 need to protect
# against sys.stdout.encoding being None stupidly forcing forcing ascii encoding
# if sys.stdout.encoding is None:
# sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
# else:
# encoding = sys.stdout.encoding
# sys.stdout = codecs.getwriter(encoding)(sys.stdout)
# Changelog
# 0.11 - Version by adamselene
# 0.11pd - Tweaked version by pdurrant
# 0.12 - extracts pictures too, and all into a folder.
# 0.13 - added back in optional output dir for those who don't want it based on infile
# 0.14 - auto flush stdout and wrapped in main, added proper return codes
# 0.15 - added support for metadata
# 0.16 - metadata now starting to be output as an opf file (PD)
# 0.17 - Also created tweaked text as source for Mobipocket Creator
# 0.18 - removed raw mobi file completely but kept _meta.html file for ease of conversion
# 0.19 - added in metadata for ASIN, Updated Title and Rights to the opf
# 0.20 - remove _meta.html since no longer needed
# 0.21 - Fixed some typos in the opf output, and also updated handling
# of test for trailing data/multibyte characters
# 0.22 - Fixed problem with > 9 images
# 0.23 - Now output Start guide item
# 0.24 - Set firstaddl value for 'TEXtREAd'
# 0.25 - Now added character set metadata to html file for utf-8 files.
# 0.26 - Dictionary support added. Image handling speed improved.
# For huge files create temp files to speed up decoding.
# Language decoding fixed. Metadata is now converted to utf-8 when written to opf file.
# 0.27 - Add idx:entry attribute "scriptable" if dictionary contains entry length tags.
# Don't save non-image sections as images. Extract and save source zip file
# included by kindlegen as kindlegensrc.zip.
# 0.28 - Added back correct image file name extensions, created FastConcat class to simplify and clean up
# 0.29 - Metadata handling reworked, multiple entries of the same type are now supported.
# Several missing types added.
# FastConcat class has been removed as in-memory handling with lists is faster, even for huge files.
# 0.30 - Add support for outputting **all** metadata values - encode content with hex if of unknown type
# 0.31 - Now supports Print Replica ebooks, outputting PDF and mysterious data sections
# 0.32 - Now supports NCX file extraction/building.
# Overhauled the structure of mobiunpack to be more class oriented.
# 0.33 - Split Classes ito separate files and added prelim support for KF8 format eBooks
# 0.34 - Improved KF8 support, guide support, bug fixes
# 0.35 - Added splitting combo mobi7/mobi8 into standalone mobi7 and mobi8 files
# Also handle mobi8-only file properly
# 0.36 - very minor changes to support KF8 mobis with no flow items, no ncx, etc
# 0.37 - separate output, add command line switches to control, interface to Mobi_Unpack.pyw
# 0.38 - improve split function by resetting flags properly, fix bug in Thumbnail Images
# 0.39 - improve split function so that ToC info is not lost for standalone mobi8s
# 0.40 - make mobi7 split match official versions, add support for graphic novel metadata,
# improve debug for KF8
# 0.41 - fix when StartOffset set to 0xffffffff, fix to work with older mobi versions,
# fix other minor metadata issues
# 0.42 - add new class interface to allow it to integrate more easily with internal calibre routines
# 0.43 - bug fixes for new class interface
# 0.44 - more bug fixes and fix for potnetial bug caused by not properly closing created zip archive
# 0.45 - sync to version in the new Mobi_Unpack plugin
# 0.46 - fixes for: obfuscated fonts, improper toc links and ncx, add support for opentype fonts
# 0.47 - minor opf improvements
# 0.48 - ncx link fixes
# 0.49 - use azw3 when splitting mobis
# 0.50 - unknown change
# 0.51 - fix for converting filepos links to hrefs, Added GPL3 notice, made KF8 extension just '.azw3'
# 0.52 - fix for cover metadata (no support for Mobipocket Creator)
# 0.53 - fix for proper identification of embedded fonts, added new metadata items
# 0.54 - Added error-handling so wonky embedded fonts don't bomb the whole unpack process,
# entity escape KF8 metadata to ensure valid OPF.
# 0.55 Strip extra StartOffset EXTH from the mobi8 header when splitting, keeping only the relevant one
# For mobi8 files, don't generate duplicate guide entries from the metadata if we could extract one
# from the OTH table.
# 0.56 - Added further entity escaping of OPF text.
# Allow unicode string file paths to be passed as arguments to the unpackBook method without blowing up later
# when the attempt to "re"-unicode a portion of that filename occurs in the process_all_mobi_headers method.
# 0.57 - Fixed eror when splitting Preview files downloaded from KDP website
# 0.58 - Output original kindlegen build log ('CMET' record) if included in the package.
# 0.58 - Include and extend functionality of DumpMobiHeader, replacing DEBUG with DUMP
# 0.59 - Much added DUMP functionality, including full dumping and descriptions of sections
# 0.60 - Bug fixes in opf, div tables, bad links, page breaks, section descriptions
# - plus a number of other bug fixed that were found by Sergey Dubinets
# - fixs for file/paths that require full unicode to work properly
# - replace subprocess with multiprocessing to remove need for unbuffered stdout
# 0.61 - renamed to be KindleUnpack and more unicode/utf-8 path bug fixes and other minor fixes
# 0.62 - fix for multiprocessing on Windows, split fixes, opf improvements
# 0.63 - Modified to process right to left page progression books properly.
# - Added some id_map_strings and RESC section processing; metadata and
# - spine in the RESC are integrated partly to content.opf.
# 0.63a- Separated K8 RESC processor to an individual file. Bug fixes. Added cover page creation.
# 0.64 - minor bug fixes to more properly handle unicode command lines, and support for more jpeg types
# 0.64a- Modifed to handle something irregular mobi and azw3 files.
# 0.64b- Modifed to create k8resc.spine for no RECS files.
# 0.65 - Bug fixes to shorten title and remove epub3 "properties" to make the output epub2 compliant
# 0.65a- Bug fixes to extract RESC section correctly, to prevent item id confliction
# - and to process multiline comments in RESC.
# 0.66 - Bug fix to deal with missing first resource information sometimes generated by calibre
# 0.66a- Fixed minor bugs, which probably do not affect the output anything
# 0.67 - Fixed Mobi Split functionality bug with azw3 images not being properly copied
# 0.68 - preliminary support for handling PAGE sections to create page-map.xml
# 0.69 - preliminary support for CONT and CRES for HD Images
# 0.70 - preliminary support for decoding apnx files when used with azw3 ebooks
# 0.71 - extensive refactoring of kindleunpack.py to make it more manageable
# 0.72 - many bug fixes from tkeo: fix pageProcessing, fix print replica, fix resc usage, fix font mangling, etc.
# 0.72a- fix for still broken PrintReplica support
# 0.72b- preview for primary epub3 support. A parameter epubver(default='2') is added to process_all_mobi_headers(), unpackBook().
# 0.72c- preview for apnx page support
# 0.72d- more bugs fixed in preview features, much improved GUI with ability to dynaically grow the Log Window with preference support
# 0.72e- more bug fixes, Tk GUI adds support for epub version and HDImage use
# 0.72f- more bug fixes, implement use hd images if present
# 0.72g- minor bug fixes and cleanups from tkeo
# 0.72h- updated mobi_header and mobi_k8proc to use the correct fragment and guide terms in place of div and other
# to better match the terms that both Calibre and Amazon use internally to their own software
# 0.72x- very experimental conversion to use new mobi_k8resc.py and some of its associated changes
# 0.72y- more changes to simplify and integrate in epub3 support in a simpler manner
# 0.72z- remove redundancy in mobi_opf.py and bug fixes for mobi_k8resc.py
# 0.73 faster mobi split, numerous bug fixes in mobi_k8proc, mobi_header, mobi_opf, mobi_k8resc, etc
# 0.74 added refines metadata, fixed language code in ncx and title in nav, added support for opf: from refines
# 0.75 much improved dictioanry support including support for multiple inflection sections, minor mobi_opf fixes
# 0.76 pre-release version only fix name related issues in opf by not using original file name in mobi7
# 0.77 bug fix for unpacking HDImages with included Fonts
# 0.80 converted to work with both python 2.7 and Python 3.3 and later
DUMP = False
""" Set to True to dump all possible information. """
WRITE_RAW_DATA = False
""" Set to True to create additional files with raw data for debugging/reverse engineering. """
SPLIT_COMBO_MOBIS = False
""" Set to True to split combination mobis into mobi7 and mobi8 pieces. """
CREATE_COVER_PAGE = True # XXX experimental
""" Create and insert a cover xhtml page. """
EOF_RECORD = b'\xe9\x8e' + b'\r\n'
""" The EOF record content. """
TERMINATION_INDICATOR1 = b'\x00'
TERMINATION_INDICATOR2 = b'\x00\x00'
TERMINATION_INDICATOR3 = b'\x00\x00\x00'
KINDLEGENSRC_FILENAME = "kindlegensrc.zip"
""" The name for the kindlegen source archive. """
KINDLEGENLOG_FILENAME = "kindlegenbuild.log"
""" The name for the kindlegen build log. """
K8_BOUNDARY = b'BOUNDARY'
""" The section data that divides K8 mobi ebooks. """
import os
import struct
import re
import zlib
import getopt
class unpackException(Exception):
pass
# import the kindleunpack support libraries
from .unpack_structure import fileNames
from .mobi_sectioner import Sectionizer, describe
from .mobi_header import MobiHeader, dump_contexth
from .mobi_utils import toBase32
from .mobi_opf import OPFProcessor
from .mobi_html import HTMLProcessor, XHTMLK8Processor
from .mobi_ncx import ncxExtract
from .mobi_k8proc import K8Processor
from .mobi_split import mobi_split
from .mobi_k8resc import K8RESCProcessor
from .mobi_nav import NAVProcessor
from .mobi_cover import CoverProcessor, get_image_type
from .mobi_pagemap import PageMapProcessor
from .mobi_dict import dictSupport
def processSRCS(i, files, rscnames, sect, data):
# extract the source zip archive and save it.
print("File contains kindlegen source archive, extracting as %s" % KINDLEGENSRC_FILENAME)
srcname = os.path.join(files.outdir, KINDLEGENSRC_FILENAME)
with open(pathof(srcname), 'wb') as f:
f.write(data[16:])
rscnames.append(None)
sect.setsectiondescription(i,"Zipped Source Files")
return rscnames
def processPAGE(i, files, rscnames, sect, data, mh, pagemapproc):
# process any page map information and create an apnx file
pagemapproc = PageMapProcessor(mh, data)
rscnames.append(None)
sect.setsectiondescription(i,"PageMap")
apnx_meta = {}
acr = sect.palmname.decode('latin-1').rstrip('\x00')
apnx_meta['acr'] = acr
apnx_meta['cdeType'] = mh.metadata['cdeType'][0]
apnx_meta['contentGuid'] = hex(int(mh.metadata['UniqueID'][0]))[2:]
apnx_meta['asin'] = mh.metadata['ASIN'][0]
apnx_meta['pageMap'] = pagemapproc.getPageMap()
if mh.version == 8:
apnx_meta['format'] = 'MOBI_8'
else:
apnx_meta['format'] = 'MOBI_7'
apnx_data = pagemapproc.generateAPNX(apnx_meta)
if mh.isK8():
outname = os.path.join(files.outdir, 'mobi8-'+files.getInputFileBasename() + '.apnx')
else:
outname = os.path.join(files.outdir, 'mobi7-'+files.getInputFileBasename() + '.apnx')
with open(pathof(outname), 'wb') as f:
f.write(apnx_data)
return rscnames, pagemapproc
def processCMET(i, files, rscnames, sect, data):
# extract the build log
print("File contains kindlegen build log, extracting as %s" % KINDLEGENLOG_FILENAME)
srcname = os.path.join(files.outdir, KINDLEGENLOG_FILENAME)
with open(pathof(srcname), 'wb') as f:
f.write(data[10:])
rscnames.append(None)
sect.setsectiondescription(i,"Kindlegen log")
return rscnames
# fonts only exist in KF8 ebooks
# Format: bytes 0 - 3: 'FONT'
# bytes 4 - 7: uncompressed size
# bytes 8 - 11: flags
# flag bit 0x0001 - zlib compression
# flag bit 0x0002 - obfuscated with xor string
# bytes 12 - 15: offset to start of compressed font data
# bytes 16 - 19: length of xor string stored before the start of the comnpress font data
# bytes 20 - 23: start of xor string
def processFONT(i, files, rscnames, sect, data, obfuscate_data, beg, rsc_ptr):
fontname = "font%05d" % i
ext = '.dat'
font_error = False
font_data = data
try:
usize, fflags, dstart, xor_len, xor_start = struct.unpack_from(b'>LLLLL',data,4)
except:
print("Failed to extract font: {0:s} from section {1:d}".format(fontname,i))
font_error = True
ext = '.failed'
pass
if not font_error:
print("Extracting font:", fontname)
font_data = data[dstart:]
extent = len(font_data)
extent = min(extent, 1040)
if fflags & 0x0002:
# obfuscated so need to de-obfuscate the first 1040 bytes
key = bytearray(data[xor_start: xor_start+ xor_len])
buf = bytearray(font_data)
for n in range(extent):
buf[n] ^= key[n%xor_len]
font_data = bytes(buf)
if fflags & 0x0001:
# ZLIB compressed data
font_data = zlib.decompress(font_data)
hdr = font_data[0:4]
if hdr == b'\0\1\0\0' or hdr == b'true' or hdr == b'ttcf':
ext = '.ttf'
elif hdr == b'OTTO':
ext = '.otf'
else:
print("Warning: unknown font header %s" % hexlify(hdr))
if (ext == '.ttf' or ext == '.otf') and (fflags & 0x0002):
obfuscate_data.append(fontname + ext)
fontname += ext
outfnt = os.path.join(files.imgdir, fontname)
with open(pathof(outfnt), 'wb') as f:
f.write(font_data)
rscnames.append(fontname)
sect.setsectiondescription(i,"Font {0:s}".format(fontname))
if rsc_ptr == -1:
rsc_ptr = i - beg
return rscnames, obfuscate_data, rsc_ptr
def processCRES(i, files, rscnames, sect, data, beg, rsc_ptr, use_hd):
# extract an HDImage
global DUMP
data = data[12:]
imgtype = get_image_type(None, data)
if imgtype is None:
print("Warning: CRES Section %s does not contain a recognised resource" % i)
rscnames.append(None)
sect.setsectiondescription(i,"Mysterious CRES data, first four bytes %s" % describe(data[0:4]))
if DUMP:
fname = "unknown%05d.dat" % i
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
sect.setsectiondescription(i,"Mysterious CRES data, first four bytes %s extracting as %s" % (describe(data[0:4]), fname))
rsc_ptr += 1
return rscnames, rsc_ptr
if use_hd:
# overwrite corresponding lower res image with hd version
imgname = rscnames[rsc_ptr]
imgdest = files.imgdir
else:
imgname = "HDimage%05d.%s" % (i, imgtype)
imgdest = files.hdimgdir
print("Extracting HD image: {0:s} from section {1:d}".format(imgname,i))
outimg = os.path.join(imgdest, imgname)
with open(pathof(outimg), 'wb') as f:
f.write(data)
rscnames.append(None)
sect.setsectiondescription(i,"Optional HD Image {0:s}".format(imgname))
rsc_ptr += 1
return rscnames, rsc_ptr
def processCONT(i, files, rscnames, sect, data):
global DUMP
# process a container header, most of this is unknown
# right now only extract its EXTH
dt = data[0:12]
if dt == b"CONTBOUNDARY":
rscnames.append(None)
sect.setsectiondescription(i,"CONTAINER BOUNDARY")
else:
sect.setsectiondescription(i,"CONT Header")
rscnames.append(None)
if DUMP:
cpage, = struct.unpack_from(b'>L', data, 12)
contexth = data[48:]
print("\n\nContainer EXTH Dump")
dump_contexth(cpage, contexth)
fname = "CONT_Header%05d.dat" % i
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
return rscnames
def processkind(i, files, rscnames, sect, data):
global DUMP
dt = data[0:12]
if dt == b"kindle:embed":
if DUMP:
print("\n\nHD Image Container Description String")
print(data)
sect.setsectiondescription(i,"HD Image Container Description String")
rscnames.append(None)
return rscnames
# spine information from the original content.opf
def processRESC(i, files, rscnames, sect, data, k8resc):
global DUMP
if DUMP:
rescname = "RESC%05d.dat" % i
print("Extracting Resource: ", rescname)
outrsc = os.path.join(files.outdir, rescname)
with open(pathof(outrsc), 'wb') as f:
f.write(data)
if True: # try:
# parse the spine and metadata from RESC
k8resc = K8RESCProcessor(data[16:], DUMP)
else: # except:
print("Warning: cannot extract information from RESC.")
k8resc = None
rscnames.append(None)
sect.setsectiondescription(i,"K8 RESC section")
return rscnames, k8resc
def processImage(i, files, rscnames, sect, data, beg, rsc_ptr, cover_offset):
global DUMP
# Extract an Image
imgtype = get_image_type(None, data)
if imgtype is None:
print("Warning: Section %s does not contain a recognised resource" % i)
rscnames.append(None)
sect.setsectiondescription(i,"Mysterious Section, first four bytes %s" % describe(data[0:4]))
if DUMP:
fname = "unknown%05d.dat" % i
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
sect.setsectiondescription(i,"Mysterious Section, first four bytes %s extracting as %s" % (describe(data[0:4]), fname))
return rscnames, rsc_ptr
imgname = "image%05d.%s" % (i, imgtype)
if cover_offset is not None and i == beg + cover_offset:
imgname = "cover%05d.%s" % (i, imgtype)
print("Extracting image: {0:s} from section {1:d}".format(imgname,i))
outimg = os.path.join(files.imgdir, imgname)
with open(pathof(outimg), 'wb') as f:
f.write(data)
rscnames.append(imgname)
sect.setsectiondescription(i,"Image {0:s}".format(imgname))
if rsc_ptr == -1:
rsc_ptr = i - beg
return rscnames, rsc_ptr
def processPrintReplica(metadata, files, rscnames, mh):
global DUMP
global WRITE_RAW_DATA
rawML = mh.getRawML()
if DUMP or WRITE_RAW_DATA:
outraw = os.path.join(files.outdir,files.getInputFileBasename() + '.rawpr')
with open(pathof(outraw),'wb') as f:
f.write(rawML)
fileinfo = []
print("Print Replica ebook detected")
try:
numTables, = struct.unpack_from(b'>L', rawML, 0x04)
tableIndexOffset = 8 + 4*numTables
# for each table, read in count of sections, assume first section is a PDF
# and output other sections as binary files
for i in range(numTables):
sectionCount, = struct.unpack_from(b'>L', rawML, 0x08 + 4*i)
for j in range(sectionCount):
sectionOffset, sectionLength, = struct.unpack_from(b'>LL', rawML, tableIndexOffset)
tableIndexOffset += 8
if j == 0:
entryName = os.path.join(files.outdir, files.getInputFileBasename() + ('.%03d.pdf' % (i+1)))
else:
entryName = os.path.join(files.outdir, files.getInputFileBasename() + ('.%03d.%03d.data' % ((i+1),j)))
with open(pathof(entryName), 'wb') as f:
f.write(rawML[sectionOffset:(sectionOffset+sectionLength)])
except Exception as e:
print('Error processing Print Replica: ' + str(e))
fileinfo.append([None,'', files.getInputFileBasename() + '.pdf'])
usedmap = {}
for name in rscnames:
if name is not None:
usedmap[name] = 'used'
opf = OPFProcessor(files, metadata, fileinfo, rscnames, False, mh, usedmap)
opf.writeOPF()
def processMobi8(mh, metadata, sect, files, rscnames, pagemapproc, k8resc, obfuscate_data, apnxfile=None, epubver='2'):
global DUMP
global WRITE_RAW_DATA
# extract raw markup langauge
rawML = mh.getRawML()
if DUMP or WRITE_RAW_DATA:
outraw = os.path.join(files.k8dir,files.getInputFileBasename() + '.rawml')
with open(pathof(outraw),'wb') as f:
f.write(rawML)
# KF8 require other indexes which contain parsing information and the FDST info
# to process the rawml back into the xhtml files, css files, svg image files, etc
k8proc = K8Processor(mh, sect, files, DUMP)
k8proc.buildParts(rawML)
# collect information for the guide first
guidetext = unicode_str(k8proc.getGuideText())
# if the guide was empty, add in any guide info from metadata, such as StartOffset
if not guidetext and 'StartOffset' in metadata:
# Apparently, KG 2.5 carries over the StartOffset from the mobi7 part...
# Taking that into account, we only care about the *last* StartOffset, which
# should always be the correct one in these cases (the one actually pointing
# to the right place in the mobi8 part).
starts = metadata['StartOffset']
last_start = starts[-1]
last_start = int(last_start)
if last_start == 0xffffffff:
last_start = 0
seq, idtext = k8proc.getFragTblInfo(last_start)
filename, idtext = k8proc.getIDTagByPosFid(toBase32(seq), b'0000000000')
linktgt = filename
idtext = unicode_str(idtext, mh.codec)
if idtext != '':
linktgt += '#' + idtext
guidetext += '<reference type="text" href="Text/%s" />\n' % linktgt
# if apnxfile is passed in use it for page map information
if apnxfile is not None and pagemapproc is None:
with open(apnxfile, 'rb') as f:
apnxdata = b"00000000" + f.read()
pagemapproc = PageMapProcessor(mh, apnxdata)
# generate the page map
pagemapxml = ''
if pagemapproc is not None:
pagemapxml = pagemapproc.generateKF8PageMapXML(k8proc)
outpm = os.path.join(files.k8oebps,'page-map.xml')
with open(pathof(outpm),'wb') as f:
f.write(pagemapxml.encode('utf-8'))
if DUMP:
print(pagemapproc.getNames())
print(pagemapproc.getOffsets())
print("\n\nPage Map")
print(pagemapxml)
# process the toc ncx
# ncx map keys: name, pos, len, noffs, text, hlvl, kind, pos_fid, parent, child1, childn, num
print("Processing ncx / toc")
ncx = ncxExtract(mh, files)
ncx_data = ncx.parseNCX()
# extend the ncx data with filenames and proper internal idtags
for i in range(len(ncx_data)):
ncxmap = ncx_data[i]
[junk1, junk2, junk3, fid, junk4, off] = ncxmap['pos_fid'].split(':')
filename, idtag = k8proc.getIDTagByPosFid(fid, off)
ncxmap['filename'] = filename
ncxmap['idtag'] = unicode_str(idtag)
ncx_data[i] = ncxmap
# convert the rawML to a set of xhtml files
print("Building an epub-like structure")
htmlproc = XHTMLK8Processor(rscnames, k8proc)
usedmap = htmlproc.buildXHTML()
# write out the xhtml svg, and css files
# fileinfo = [skelid|coverpage, dir, name]
fileinfo = []
# first create a cover page if none exists
if CREATE_COVER_PAGE:
cover = CoverProcessor(files, metadata, rscnames)
cover_img = utf8_str(cover.getImageName())
need_to_create_cover_page = False
if cover_img is not None:
if k8resc is None or not k8resc.hasSpine():
part = k8proc.getPart(0)
if part.find(cover_img) == -1:
need_to_create_cover_page = True
else:
if "coverpage" not in k8resc.spine_idrefs:
part = k8proc.getPart(int(k8resc.spine_order[0]))
if part.find(cover_img) == -1:
k8resc.prepend_to_spine("coverpage", "inserted", "no", None)
if k8resc.spine_order[0] == "coverpage":
need_to_create_cover_page = True
if need_to_create_cover_page:
filename = cover.getXHTMLName()
fileinfo.append(["coverpage", 'Text', filename])
guidetext += cover.guide_toxml()
cover.writeXHTML()
n = k8proc.getNumberOfParts()
for i in range(n):
part = k8proc.getPart(i)
[skelnum, dir, filename, beg, end, aidtext] = k8proc.getPartInfo(i)
fileinfo.append([str(skelnum), dir, filename])
fname = os.path.join(files.k8oebps,dir,filename)
with open(pathof(fname),'wb') as f:
f.write(part)
n = k8proc.getNumberOfFlows()
for i in range(1, n):
[ptype, pformat, pdir, filename] = k8proc.getFlowInfo(i)
flowpart = k8proc.getFlow(i)
if pformat == b'file':
fileinfo.append([None, pdir, filename])
fname = os.path.join(files.k8oebps,pdir,filename)
with open(pathof(fname),'wb') as f:
f.write(flowpart)
# create the opf
opf = OPFProcessor(files, metadata.copy(), fileinfo, rscnames, True, mh, usedmap,
pagemapxml=pagemapxml, guidetext=guidetext, k8resc=k8resc, epubver=epubver)
uuid = opf.writeOPF(bool(obfuscate_data))
if opf.hasNCX():
# Create a toc.ncx.
ncx.writeK8NCX(ncx_data, metadata)
if opf.hasNAV():
# Create a navigation document.
nav = NAVProcessor(files)
nav.writeNAV(ncx_data, guidetext, metadata)
# make an epub-like structure of it all
print("Creating an epub-like file")
files.makeEPUB(usedmap, obfuscate_data, uuid)
def processMobi7(mh, metadata, sect, files, rscnames):
global DUMP
global WRITE_RAW_DATA
# An original Mobi
rawML = mh.getRawML()
if DUMP or WRITE_RAW_DATA:
outraw = os.path.join(files.mobi7dir,files.getInputFileBasename() + '.rawml')
with open(pathof(outraw),'wb') as f:
f.write(rawML)
# process the toc ncx
# ncx map keys: name, pos, len, noffs, text, hlvl, kind, pos_fid, parent, child1, childn, num
ncx = ncxExtract(mh, files)
ncx_data = ncx.parseNCX()
ncx.writeNCX(metadata)
positionMap = {}
# if Dictionary build up the positionMap
if mh.isDictionary():
if mh.DictInLanguage():
metadata['DictInLanguage'] = [mh.DictInLanguage()]
if mh.DictOutLanguage():
metadata['DictOutLanguage'] = [mh.DictOutLanguage()]
positionMap = dictSupport(mh, sect).getPositionMap()
# convert the rawml back to Mobi ml
proc = HTMLProcessor(files, metadata, rscnames)
srctext = proc.findAnchors(rawML, ncx_data, positionMap)
srctext, usedmap = proc.insertHREFS()
# write the proper mobi html
fileinfo=[]
# fname = files.getInputFileBasename() + '.html'
fname = 'book.html'
fileinfo.append([None,'', fname])
outhtml = os.path.join(files.mobi7dir, fname)
with open(pathof(outhtml), 'wb') as f:
f.write(srctext)
# extract guidetext from srctext
guidetext =b''
# no pagemap support for older mobis
# pagemapxml = None
guidematch = re.search(br'''<guide>(.*)</guide>''',srctext,re.IGNORECASE+re.DOTALL)
if guidematch:
guidetext = guidematch.group(1)
# sometimes old mobi guide from srctext horribly written so need to clean up
guidetext = guidetext.replace(b"\r", b"")
guidetext = guidetext.replace(b'<REFERENCE', b'<reference')
guidetext = guidetext.replace(b' HREF=', b' href=')
guidetext = guidetext.replace(b' TITLE=', b' title=')
guidetext = guidetext.replace(b' TYPE=', b' type=')
# reference must be a self-closing tag
# and any href must be replaced with filepos information
ref_tag_pattern = re.compile(br'''(<reference [^>]*>)''', re.IGNORECASE)
guidepieces = ref_tag_pattern.split(guidetext)
for i in range(1,len(guidepieces), 2):
reftag = guidepieces[i]
# remove any href there now to replace with filepos
reftag = re.sub(br'''href\s*=[^'"]*['"][^'"]*['"]''',b'', reftag)
# make sure the reference tag ends properly
if not reftag.endswith(b"/>"):
reftag = reftag[0:-1] + b"/>"
guidepieces[i] = reftag
guidetext = b''.join(guidepieces)
replacetext = br'''href="'''+utf8_str(fileinfo[0][2])+ br'''#filepos\1"'''
guidetext = re.sub(br'''filepos=['"]{0,1}0*(\d+)['"]{0,1}''', replacetext, guidetext)
guidetext += b'\n'
if 'StartOffset' in metadata:
for value in metadata['StartOffset']:
if int(value) == 0xffffffff:
value = '0'
starting_offset = value
# get guide items from metadata
metaguidetext = b'<reference type="text" href="'+utf8_str(fileinfo[0][2])+b'#filepos'+utf8_str(starting_offset)+b'" />\n'
guidetext += metaguidetext
if isinstance(guidetext, binary_type):
guidetext = guidetext.decode(mh.codec)
# create an OPF
opf = OPFProcessor(files, metadata, fileinfo, rscnames, ncx.isNCX, mh, usedmap, guidetext=guidetext)
opf.writeOPF()
def processUnknownSections(mh, sect, files, K8Boundary):
global DUMP
global TERMINATION_INDICATOR1
global TERMINATION_INDICATOR2
global TERMINATION_INDICATOR3
if DUMP:
print("Unpacking any remaining unknown records")
beg = mh.start
end = sect.num_sections
if beg < K8Boundary:
# then we're processing the first part of a combination file
end = K8Boundary
for i in range(beg, end):
if sect.sectiondescriptions[i] == "":
data = sect.loadSection(i)
type = data[0:4]
if type == TERMINATION_INDICATOR3:
description = "Termination Marker 3 Nulls"
elif type == TERMINATION_INDICATOR2:
description = "Termination Marker 2 Nulls"
elif type == TERMINATION_INDICATOR1:
description = "Termination Marker 1 Null"
elif type == "INDX":
fname = "Unknown%05d_INDX.dat" % i
description = "Unknown INDX section"
if DUMP:
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
print("Extracting %s: %s from section %d" % (description, fname, i))
description = description + ", extracting as %s" % fname
else:
fname = "unknown%05d.dat" % i
description = "Mysterious Section, first four bytes %s" % describe(data[0:4])
if DUMP:
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
print("Extracting %s: %s from section %d" % (description, fname, i))
description = description + ", extracting as %s" % fname
sect.setsectiondescription(i, description)
def process_all_mobi_headers(files, apnxfile, sect, mhlst, K8Boundary, k8only=False, epubver='2', use_hd=False):
global DUMP
global WRITE_RAW_DATA
rscnames = []
rsc_ptr = -1
k8resc = None
obfuscate_data = []
for mh in mhlst:
pagemapproc = None
if mh.isK8():
sect.setsectiondescription(mh.start,"KF8 Header")
mhname = os.path.join(files.outdir,"header_K8.dat")
print("Processing K8 section of book...")
elif mh.isPrintReplica():
sect.setsectiondescription(mh.start,"Print Replica Header")
mhname = os.path.join(files.outdir,"header_PR.dat")
print("Processing PrintReplica section of book...")
else:
if mh.version == 0:
sect.setsectiondescription(mh.start, "PalmDoc Header".format(mh.version))
else:
sect.setsectiondescription(mh.start,"Mobipocket {0:d} Header".format(mh.version))
mhname = os.path.join(files.outdir,"header.dat")
print("Processing Mobipocket {0:d} section of book...".format(mh.version))
if DUMP:
# write out raw mobi header data
with open(pathof(mhname), 'wb') as f:
f.write(mh.header)
# process each mobi header
metadata = mh.getMetaData()
mh.describeHeader(DUMP)
if mh.isEncrypted():
raise unpackException('Book is encrypted')
pagemapproc = None
# first handle all of the different resource sections: images, resources, fonts, and etc
# build up a list of image names to use to postprocess the ebook
print("Unpacking images, resources, fonts, etc")
beg = mh.firstresource
end = sect.num_sections
if beg < K8Boundary:
# processing first part of a combination file
end = K8Boundary
cover_offset = int(metadata.get('CoverOffset', ['-1'])[0])
if not CREATE_COVER_PAGE:
cover_offset = None
for i in range(beg, end):
data = sect.loadSection(i)
type = data[0:4]
# handle the basics first
if type in [b"FLIS", b"FCIS", b"FDST", b"DATP"]:
if DUMP:
fname = unicode_str(type) + "%05d" % i
if mh.isK8():
fname += "_K8"
fname += '.dat'
outname= os.path.join(files.outdir, fname)
with open(pathof(outname), 'wb') as f:
f.write(data)
print("Dumping section {0:d} type {1:s} to file {2:s} ".format(i,unicode_str(type),outname))
sect.setsectiondescription(i,"Type {0:s}".format(unicode_str(type)))
rscnames.append(None)
elif type == b"SRCS":
rscnames = processSRCS(i, files, rscnames, sect, data)
elif type == b"PAGE":
rscnames, pagemapproc = processPAGE(i, files, rscnames, sect, data, mh, pagemapproc)
elif type == b"CMET":
rscnames = processCMET(i, files, rscnames, sect, data)
elif type == b"FONT":
rscnames, obfuscate_data, rsc_ptr = processFONT(i, files, rscnames, sect, data, obfuscate_data, beg, rsc_ptr)
elif type == b"CRES":
rscnames, rsc_ptr = processCRES(i, files, rscnames, sect, data, beg, rsc_ptr, use_hd)
elif type == b"CONT":
rscnames = processCONT(i, files, rscnames, sect, data)
elif type == b"kind":
rscnames = processkind(i, files, rscnames, sect, data)
elif type == b'\xa0\xa0\xa0\xa0':
sect.setsectiondescription(i,"Empty_HD_Image/Resource_Placeholder")
rscnames.append(None)
rsc_ptr += 1
elif type == b"RESC":
rscnames, k8resc = processRESC(i, files, rscnames, sect, data, k8resc)
elif data == EOF_RECORD:
sect.setsectiondescription(i,"End Of File")
rscnames.append(None)
elif data[0:8] == b"BOUNDARY":
sect.setsectiondescription(i,"BOUNDARY Marker")
rscnames.append(None)
else:
# if reached here should be an image ow treat as unknown
rscnames, rsc_ptr = processImage(i, files, rscnames, sect, data, beg, rsc_ptr, cover_offset)
# done unpacking resources
# Print Replica
if mh.isPrintReplica() and not k8only:
processPrintReplica(metadata, files, rscnames, mh)
continue
# KF8 (Mobi 8)
if mh.isK8():
processMobi8(mh, metadata, sect, files, rscnames, pagemapproc, k8resc, obfuscate_data, apnxfile, epubver)
# Old Mobi (Mobi 7)
elif not k8only:
processMobi7(mh, metadata, sect, files, rscnames)
# process any remaining unknown sections of the palm file
processUnknownSections(mh, sect, files, K8Boundary)
return
def unpackBook(infile, outdir, apnxfile=None, epubver='2', use_hd=False, dodump=False, dowriteraw=False, dosplitcombos=False):
global DUMP
global WRITE_RAW_DATA
global SPLIT_COMBO_MOBIS
if DUMP or dodump:
DUMP = True
if WRITE_RAW_DATA or dowriteraw:
WRITE_RAW_DATA = True
if SPLIT_COMBO_MOBIS or dosplitcombos:
SPLIT_COMBO_MOBIS = True
infile = unicode_str(infile)
outdir = unicode_str(outdir)
if apnxfile is not None:
apnxfile = unicode_str(apnxfile)
files = fileNames(infile, outdir)
# process the PalmDoc database header and verify it is a mobi
sect = Sectionizer(infile)
if sect.ident != b'BOOKMOBI' and sect.ident != b'TEXtREAd':
raise unpackException('Invalid file format')
if DUMP:
sect.dumppalmheader()
else:
print("Palm DB type: %s, %d sections." % (sect.ident.decode('utf-8'),sect.num_sections))
# scan sections to see if this is a compound mobi file (K8 format)
# and build a list of all mobi headers to process.
mhlst = []
mh = MobiHeader(sect,0)
# if this is a mobi8-only file hasK8 here will be true
mhlst.append(mh)
K8Boundary = -1
if mh.isK8():
print("Unpacking a KF8 book...")
hasK8 = True
else:
# This is either a Mobipocket 7 or earlier, or a combi M7/KF8
# Find out which
hasK8 = False
for i in range(len(sect.sectionoffsets)-1):
before, after = sect.sectionoffsets[i:i+2]
if (after - before) == 8:
data = sect.loadSection(i)
if data == K8_BOUNDARY:
sect.setsectiondescription(i,"Mobi/KF8 Boundary Section")
mh = MobiHeader(sect,i+1)
hasK8 = True
mhlst.append(mh)
K8Boundary = i
break
if hasK8:
print("Unpacking a Combination M{0:d}/KF8 book...".format(mh.version))
if SPLIT_COMBO_MOBIS:
# if this is a combination mobi7-mobi8 file split them up
mobisplit = mobi_split(infile)
if mobisplit.combo:
outmobi7 = os.path.join(files.outdir, 'mobi7-'+files.getInputFileBasename() + '.mobi')
outmobi8 = os.path.join(files.outdir, 'mobi8-'+files.getInputFileBasename() + '.azw3')
with open(pathof(outmobi7), 'wb') as f:
f.write(mobisplit.getResult7())
with open(pathof(outmobi8), 'wb') as f:
f.write(mobisplit.getResult8())
else:
print("Unpacking a Mobipocket {0:d} book...".format(mh.version))
if hasK8:
files.makeK8Struct()
process_all_mobi_headers(files, apnxfile, sect, mhlst, K8Boundary, False, epubver, use_hd)
if DUMP:
sect.dumpsectionsinfo()
return
def usage(progname):
print("")
print("Description:")
print(" Unpacks an unencrypted Kindle/MobiPocket ebook to html and images")
print(" or an unencrypted Kindle/Print Replica ebook to PDF and images")
print(" into the specified output folder.")
print("Usage:")
print(" %s -r -s -p apnxfile -d -h --epub_version= infile [outdir]" % progname)
print("Options:")
print(" -h print this help message")
print(" -i use HD Images, if present, to overwrite reduced resolution images")
print(" -s split combination mobis into mobi7 and mobi8 ebooks")
print(" -p APNXFILE path to an .apnx file associated with the azw3 input (optional)")
print(" --epub_version= specify epub version to unpack to: 2, 3, A (for automatic) or ")
print(" F (force to fit to epub2 definitions), default is 2")
print(" -d dump headers and other info to output and extra files")
print(" -r write raw data to the output folder")
# def main(argv=unicode_argv()):
# global DUMP
# global WRITE_RAW_DATA
# global SPLIT_COMBO_MOBIS
#
# print("KindleUnpack v0.80")
# print(" Based on initial mobipocket version Copyright © 2009 Charles M. Hannum <[email protected]>")
# print(" Extensive Extensions and Improvements Copyright © 2009-2014 ")
# print(" by: P. Durrant, K. Hendricks, S. Siebert, fandrieu, DiapDealer, nickredding, tkeo.")
# print(" This program is free software: you can redistribute it and/or modify")
# print(" it under the terms of the GNU General Public License as published by")
# print(" the Free Software Foundation, version 3.")
#
# progname = os.path.basename(argv[0])
# try:
# opts, args = getopt.getopt(argv[1:], "dhirsp:", ['epub_version='])
# except getopt.GetoptError as err:
# print(str(err))
# usage(progname)
# sys.exit(2)
#
# if len(args)<1:
# usage(progname)
# sys.exit(2)
#
# apnxfile = None
# epubver = '2'
# use_hd = False
#
# for o, a in opts:
# if o == "-h":
# usage(progname)
# sys.exit(0)
# if o == "-i":
# use_hd = True
# if o == "-d":
# DUMP = True
# if o == "-r":
# WRITE_RAW_DATA = True
# if o == "-s":
# SPLIT_COMBO_MOBIS = True
# if o == "-p":
# apnxfile = a
# if o == "--epub_version":
# epubver = a
#
# if len(args) > 1:
# infile, outdir = args
# else:
# infile = args[0]
# outdir = os.path.splitext(infile)[0]
#
# infileext = os.path.splitext(infile)[1].upper()
# if infileext not in ['.MOBI', '.PRC', '.AZW', '.AZW3', '.AZW4']:
# print("Error: first parameter must be a Kindle/Mobipocket ebook or a Kindle/Print Replica ebook.")
# return 1
#
# try:
# print('Unpacking Book...')
# unpackBook(infile, outdir, apnxfile, epubver, use_hd)
# print('Completed')
#
# except ValueError as e:
# print("Error: %s" % e)
# print(traceback.format_exc())
# return 1
#
# return 0
#
#
# if __name__ == '__main__':
# sys.exit(main())
| robwebset/script.ebooks | resources/lib/kindleunpack/kindleunpack.py | Python | gpl-2.0 | 43,757 |
"""
File: ULAI03.py
Library Call Demonstrated: mcculw.ul.a_in_scan() in Background mode with scan
option mcculw.enums.ScanOptions.BACKGROUND
Purpose: Scans a range of A/D Input Channels and stores
the sample data in an array.
Demonstration: Displays the analog input on up to eight channels.
Other Library Calls: mcculw.ul.win_buf_alloc()
or mcculw.ul.win_buf_alloc_32
mcculw.ul.win_buf_free()
mcculw.ul.get_status()
mcculw.ul.stop_background()
Special Requirements: Device must have an A/D converter.
Analog signals on up to eight input channels.
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from tkinter import messagebox
from ctypes import cast, POINTER, c_ushort, c_ulong
from mcculw import ul
from mcculw.enums import ScanOptions, Status, FunctionType
from mcculw.ul import ULError
from mcculw.device_info import DaqDeviceInfo
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .ui_examples_util import UIExample, show_ul_error
class ULAI03(UIExample):
def __init__(self, master=None):
super(ULAI03, self).__init__(master)
# By default, the example detects all available devices and selects the
# first device listed.
# If use_device_detection is set to False, the board_num property needs
# to match the desired board number configured with Instacal.
use_device_detection = True
self.board_num = 0
try:
if use_device_detection:
self.configure_first_detected_device()
self.device_info = DaqDeviceInfo(self.board_num)
self.ai_info = self.device_info.get_ai_info()
if self.ai_info.is_supported and self.ai_info.supports_scan:
self.create_widgets()
else:
self.create_unsupported_widgets()
except ULError:
self.create_unsupported_widgets()
def start_scan(self):
self.low_chan = self.get_low_channel_num()
self.high_chan = self.get_high_channel_num()
self.num_chans = self.high_chan - self.low_chan + 1
if self.low_chan > self.high_chan:
messagebox.showerror(
"Error",
"Low Channel Number must be greater than or equal to High "
"Channel Number")
self.set_ui_idle_state()
return
rate = 100
points_per_channel = 1000
total_count = points_per_channel * self.num_chans
ai_range = self.ai_info.supported_ranges[0]
# Allocate a buffer for the scan
if self.ai_info.resolution <= 16:
# Use the win_buf_alloc method for devices with a resolution <=
# 16
self.memhandle = ul.win_buf_alloc(total_count)
# Convert the memhandle to a ctypes array
# Use the memhandle_as_ctypes_array method for devices with a
# resolution <= 16
self.ctypes_array = cast(self.memhandle, POINTER(c_ushort))
else:
# Use the win_buf_alloc_32 method for devices with a resolution
# > 16
self.memhandle = ul.win_buf_alloc_32(total_count)
# Use the memhandle_as_ctypes_array_32 method for devices with a
# resolution > 16
self.ctypes_array = cast(self.memhandle, POINTER(c_ulong))
# Note: the ctypes array will no longer be valid after
# win_buf_free is called.
# A copy of the buffer can be created using win_buf_to_array
# or win_buf_to_array_32 before the memory is freed. The copy
# can be used at any time.
# Check if the buffer was successfully allocated
if not self.memhandle:
messagebox.showerror("Error", "Failed to allocate memory")
self.set_ui_idle_state()
return
# Create the frames that will hold the data
self.recreate_data_frame()
try:
# Start the scan
ul.a_in_scan(self.board_num, self.low_chan, self.high_chan,
total_count, rate, ai_range, self.memhandle,
ScanOptions.BACKGROUND)
except ULError as e:
show_ul_error(e)
self.set_ui_idle_state()
return
# Start updating the displayed values
self.update_displayed_values()
def update_displayed_values(self):
# Get the status from the device
status, curr_count, curr_index = ul.get_status(self.board_num,
FunctionType.AIFUNCTION)
# Display the status info
self.update_status_labels(status, curr_count, curr_index)
# Display the values
self.display_values(curr_index, curr_count)
# Call this method again until the stop button is pressed
if status == Status.RUNNING:
self.after(100, self.update_displayed_values)
else:
# Free the allocated memory
ul.win_buf_free(self.memhandle)
# Stop the background operation (this is required even if the
# scan completes successfully)
ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
self.set_ui_idle_state()
def update_status_labels(self, status, curr_count, curr_index):
if status == Status.IDLE:
self.status_label["text"] = "Idle"
else:
self.status_label["text"] = "Running"
self.index_label["text"] = str(curr_index)
self.count_label["text"] = str(curr_count)
def display_values(self, curr_index, curr_count):
per_channel_display_count = 10
array = self.ctypes_array
low_chan = self.low_chan
high_chan = self.high_chan
channel_text = []
# Add the headers
for chan_num in range(low_chan, high_chan + 1):
channel_text.append("Channel " + str(chan_num) + "\n")
# If no data has been gathered, don't add data to the labels
if curr_count > 1:
chan_count = high_chan - low_chan + 1
chan_num = low_chan
# curr_index points to the start of the last completed channel
# scan that was transferred between the board and the data
# buffer. Based on this, calculate the first index we want to
# display using subtraction.
first_index = max(curr_index - ((per_channel_display_count - 1)
* chan_count), 0)
last_index = first_index + min(chan_count
* per_channel_display_count,
curr_count)
# Add (up to) the latest 10 values for each channel to the text
for data_index in range(first_index, last_index):
channel_text[chan_num - low_chan] += (str(array[data_index])
+ "\n")
chan_num = low_chan if chan_num == high_chan else chan_num + 1
# Update the labels for each channel
for chan_num in range(low_chan, high_chan + 1):
chan_index = chan_num - low_chan
self.chan_labels[chan_index]["text"] = channel_text[chan_index]
def recreate_data_frame(self):
low_chan = self.low_chan
high_chan = self.high_chan
new_data_frame = tk.Frame(self.inner_data_frame)
self.chan_labels = []
# Add the labels for each channel
for chan_num in range(low_chan, high_chan + 1):
chan_label = tk.Label(new_data_frame, justify=tk.LEFT, padx=3)
chan_label.grid(row=0, column=chan_num - low_chan)
self.chan_labels.append(chan_label)
self.data_frame.destroy()
self.data_frame = new_data_frame
self.data_frame.grid()
def stop(self):
ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
def set_ui_idle_state(self):
self.high_channel_entry["state"] = tk.NORMAL
self.low_channel_entry["state"] = tk.NORMAL
self.start_button["command"] = self.start
self.start_button["text"] = "Start"
def start(self):
self.high_channel_entry["state"] = tk.DISABLED
self.low_channel_entry["state"] = tk.DISABLED
self.start_button["command"] = self.stop
self.start_button["text"] = "Stop"
self.start_scan()
def get_low_channel_num(self):
if self.ai_info.num_chans == 1:
return 0
try:
return int(self.low_channel_entry.get())
except ValueError:
return 0
def get_high_channel_num(self):
if self.ai_info.num_chans == 1:
return 0
try:
return int(self.high_channel_entry.get())
except ValueError:
return 0
def validate_channel_entry(self, p):
if p == '':
return True
try:
value = int(p)
if value < 0 or value > self.ai_info.num_chans - 1:
return False
except ValueError:
return False
return True
def create_widgets(self):
'''Create the tkinter UI'''
self.device_label = tk.Label(self)
self.device_label.pack(fill=tk.NONE, anchor=tk.NW)
self.device_label["text"] = ('Board Number ' + str(self.board_num)
+ ": " + self.device_info.product_name
+ " (" + self.device_info.unique_id + ")")
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
curr_row = 0
if self.ai_info.num_chans > 1:
channel_vcmd = self.register(self.validate_channel_entry)
low_channel_entry_label = tk.Label(main_frame)
low_channel_entry_label["text"] = "Low Channel Number:"
low_channel_entry_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.low_channel_entry = tk.Spinbox(
main_frame, from_=0,
to=max(self.ai_info.num_chans - 1, 0),
validate='key', validatecommand=(channel_vcmd, '%P'))
self.low_channel_entry.grid(
row=curr_row, column=1, sticky=tk.W)
curr_row += 1
high_channel_entry_label = tk.Label(main_frame)
high_channel_entry_label["text"] = "High Channel Number:"
high_channel_entry_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.high_channel_entry = tk.Spinbox(
main_frame, from_=0, validate='key',
to=max(self.ai_info.num_chans - 1, 0),
validatecommand=(channel_vcmd, '%P'))
self.high_channel_entry.grid(
row=curr_row, column=1, sticky=tk.W)
initial_value = min(self.ai_info.num_chans - 1, 3)
self.high_channel_entry.delete(0, tk.END)
self.high_channel_entry.insert(0, str(initial_value))
curr_row += 1
self.results_group = tk.LabelFrame(
self, text="Results", padx=3, pady=3)
self.results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)
self.results_group.grid_columnconfigure(1, weight=1)
curr_row = 0
status_left_label = tk.Label(self.results_group)
status_left_label["text"] = "Status:"
status_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.status_label = tk.Label(self.results_group)
self.status_label["text"] = "Idle"
self.status_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
index_left_label = tk.Label(self.results_group)
index_left_label["text"] = "Index:"
index_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.index_label = tk.Label(self.results_group)
self.index_label["text"] = "-1"
self.index_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
count_left_label = tk.Label(self.results_group)
count_left_label["text"] = "Count:"
count_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.count_label = tk.Label(self.results_group)
self.count_label["text"] = "0"
self.count_label.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
self.inner_data_frame = tk.Frame(self.results_group)
self.inner_data_frame.grid(
row=curr_row, column=0, columnspan=2, sticky=tk.W)
self.data_frame = tk.Frame(self.inner_data_frame)
self.data_frame.grid()
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
self.start_button = tk.Button(button_frame)
self.start_button["text"] = "Start"
self.start_button["command"] = self.start
self.start_button.grid(row=0, column=0, padx=3, pady=3)
self.quit_button = tk.Button(button_frame)
self.quit_button["text"] = "Quit"
self.quit_button["command"] = self.master.destroy
self.quit_button.grid(row=0, column=1, padx=3, pady=3)
if __name__ == "__main__":
# Start the example
ULAI03(master=tk.Tk()).mainloop()
| mccdaq/mcculw | examples/ui/ULAI03.py | Python | mit | 13,607 |
__author__ = 'Guanhua, Joms'
from numpy import exp, dot, array, shape, sum
from scipy.io import loadmat
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('train_path', help='Path to training database')
arguments = parser.parse_args()
roaddata = loadmat(arguments.train_path)
class ProbNeuralNetwork(object):
def __init__(self, roaddata):
self._training_data = roaddata['training_data']
self._dev_data = roaddata['dev_data']
self._test_data = roaddata['test_data']
self._training_target = roaddata['training_target']
self._dev_target = roaddata['dev_target']
self._test_target = roaddata['test_target']
self._feature_dim = roaddata['feature_dim']
self._training_dim = roaddata['training_dim']
self._dev_dim = roaddata['dev_dim']
self._test_dim = roaddata['test_dim']
road_data = []
non_road_data = []
for i in range(self._training_dim):
if self._training_target[i] == 1:
road_data.append(self._training_data[i].tolist())
else:
non_road_data.append(self._training_data[i].tolist())
road_data = array(road_data)
non_road_data = array(non_road_data)
self._road_data = road_data.T
self._non_road_data = non_road_data.T
self._road_data_dim = shape(road_data)[0]
self._non_road_data_dim = shape(non_road_data)[0]
def predict(self, new_point, smoothing):
Class_1 = self.activation(new_point, self._road_data, smoothing)/self._road_data_dim
Class_0 = self.activation(new_point, self._non_road_data, smoothing)/self._non_road_data_dim
if Class_1 >= Class_0:
Class = 1
else:
Class = 0
return Class
def activation(self, new_point, data, smoothing):
return sum(exp((dot(new_point, data)-1)/(smoothing**2))/self._training_dim)
prob_nn = ProbNeuralNetwork(roaddata)
acc = 0.0
road_acc = 0.0
nonroad_acc = 0.0
road_num = 0
nonroad_num = 0
list_non = []
list_road = []
res = 0.0
# smoothing value is critical
smoothing = 2.9
for i in range(roaddata['test_dim']):
result = prob_nn.predict(roaddata['test_data'][i], smoothing)
if roaddata['test_target'][i] == 1:
road_num += 1
else:
nonroad_num += 1
if result == roaddata['test_target'][i]:
acc += 1
if roaddata['test_target'][i] == 1:
road_acc += 1
else:
nonroad_acc += 1
acc = acc/roaddata['test_dim']*100
acc = acc[0][0].item()
road_acc = road_acc/road_num*100
nonroad_acc = nonroad_acc/nonroad_num*100
avg_acc = (nonroad_acc+road_acc)/2
print('smoothing value is', smoothing)
print('overall acc is %.2f' % acc)
print('acc for road is %.2f' % road_acc)
print('acc for non road is %.2f' % nonroad_acc)
print('avg acc is %.2f ' % avg_acc)
| archonren/project | algorithms/pnn.py | Python | mit | 2,871 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from ..lib.decorators import json
from . import api
@api.route("/test", methods=["GET"])
@json
def test():
return {}
@api.route("/auth/register", methods=["GET"])
def auth_register():
return {}
@api.route("/auth/exist", methods=["get"])
| ee-book/api | api/v1/users.py | Python | apache-2.0 | 416 |
import argparse
import sys
import tempestmail.cmd
from tempestmail.tempest_mail import TempestMail
class TempestMailCmd(tempestmail.cmd.TempestMailApp):
def parse_arguments(self):
parser = argparse.ArgumentParser(description='tempest-mail')
parser.add_argument('-c', dest='config',
default='/etc/tempest-mail/tempest-mail.yaml',
help='Path to config file')
parser.add_argument('-l', dest='logconfig',
help='Path to log config file')
parser.add_argument('-p', dest='pidfile',
help='Path to pid file',
default='/var/run/tempest-mail/tempest-mail.pid')
parser.add_argument('--version', dest='version',
help='Show version')
parser.add_argument('--downstream', dest='downstream',
action='store_true',
help='Logs are downstream')
parser.add_argument('--upstream', dest='upstream',
action='store_true',
help='Logs are upstream',
default=True)
self.args = parser.parse_args()
def main(self):
self.setup_logging()
self.mailcmd = TempestMail(self.args.config,
self.args.upstream,
self.args.downstream)
config = self.mailcmd.loadConfig()
self.mailcmd.setConfig(config)
self.mailcmd.checkJobs()
def main():
tmc = TempestMailCmd()
tmc.parse_arguments()
tmc.setup_logging()
return tmc.main()
if __name__ == '__main__':
sys.exit(main())
| arxcruz/tempest-tool | tempestmail/cmd/tempestmailcmd.py | Python | gpl-3.0 | 1,740 |
#! /usr/bin/env python
import glob
import os
import re
__author__ = 'Peipei YI'
dataset = '~/working/dataset/snap/'
dataset = os.path.expanduser(dataset)
tflabel = '~/working/TF_label_release/'
tflabel = os.path.expanduser(tflabel)
output = '/tmp/csppyi/'
re_k = re.compile('const int K = [\d]+;')
def gen_cmd(k):
script_path = tflabel + 'tflabel_k' + str(k) + '.sh'
new_k = 'const int K = ' + str(k) + ';'
tflabel_path = output + 'tflabel_out/tflabel_k' + str(k) + '/'
tflabel_log_path = output + 'tflabel_out/tflabel_k' + str(k) + '_log/'
# make dirs for tFlabel output
if not os.path.exists(tflabel_path):
os.makedirs(tflabel_path)
if not os.path.exists(tflabel_log_path):
os.makedirs(tflabel_log_path)
# update pre_check.cpp with new assignment for k
# cpp = open(cpp_path, 'r').read()
# cpp = re.sub(re_k, new_k, cpp, count=1)
# open(cpp_path, 'w').write(cpp)
# generate shell script
with open(script_path, 'wb+') as f:
cmd = '#! /bin/bash\n'
# compile
cmd += 'g++ -o '+ tflabel + 'pre_processing/pre_check.exe ' + tflabel + 'pre_processing/pre_check.cpp -O3\n'
# index
for dname in sorted(glob.glob(dataset + '/*.clean')):
filename = os.path.basename(dname)
cmd += tflabel + 'pre_processing/pre_check.exe ' + dataset + filename + ' -i ' + tflabel_path + filename + ' &> ' + tflabel_log_path + filename + '.log\n'
f.write(cmd)
if __name__ == '__main__':
for i in range(0, 10):
gen_cmd(2**i)
| yipeipei/peppy | _exp/reach/gen_tflabel_cmd.py | Python | mit | 1,561 |
"""
Contains utilities used during testing
"""
import os
import stat
import shutil
def remove_tree(tree):
"""
reset the permission of a file and directory tree and remove it
"""
os.chmod(tree, 0o777)
shutil.rmtree(tree)
def remove_file(file_name):
"""
reset the permission of a file and remove it
"""
os.chmod(file_name, 0o777)
os.remove(file_name)
def create_file(file_name):
"""
create an file
"""
return open(file_name, "w").close()
def create_directory(directory_name):
"""
create an directory
"""
os.makedirs(directory_name)
class ChangeDirectory:
# pylint: disable=too-few-public-methods
"""
Context manager for changing the current working directory
"""
def __init__(self, new_path):
self.new_path = os.path.expanduser(new_path)
self.saved_path = os.getcwd()
def __enter__(self):
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.saved_path)
def create_tree(tree):
"""
create an file and directory tree
"""
for branch in tree:
if isinstance(branch, str):
create_file(branch)
elif isinstance(branch, dict):
for directory, file_objs in branch.items():
create_directory(directory)
with ChangeDirectory(directory):
create_tree(file_objs)
def remove_read_permission(path):
"""
change users permissions to a path to write only
"""
mode = os.stat(path)[stat.ST_MODE]
os.chmod(path, mode & ~stat.S_IRUSR & ~stat.S_IRGRP & ~stat.S_IROTH)
def add_read_permission(path):
"""
change users permissions to a path to write only
"""
mode = os.stat(path)[stat.ST_MODE]
os.chmod(path, mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
def remove_write_permission(path):
"""
change users permissions to a path to read only
"""
mode = os.stat(path)[stat.ST_MODE]
os.chmod(path, mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH)
def remove_execute_permission(path):
"""
change users permissions to a path to read only
"""
mode = os.stat(path)[stat.ST_MODE]
os.chmod(path, mode & ~stat.S_IXUSR & ~stat.S_IXGRP & ~stat.S_IXOTH)
| arecarn/dploy | tests/utils.py | Python | mit | 2,296 |
class my_song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def awesome_song(self):
for line in self.lyrics:
print line
euphoria = my_song(["Why, why can't this moment last forevermore?",
"Tonight, tonight eternity's an open door",
"No, don't ever stop doing the things you do",
"Don't go, in every breath I take I'm breathing you"])
chorus = my_song(["Euphoria",
"Forever, 'till the end of time",
"From now on, only you and I",
"We're going u-u-u-u-u-u-up",
"Euphoria",
"An everlasting piece of art",
"A beating love within my heart",
"We're going u-u-u-u-u-u-up"])
euphoria.awesome_song()
chorus.awesome_song() | CodeCatz/litterbox | Pija/LearnPythontheHardWay/ex40_1.py | Python | mit | 682 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that published std_msgs/Strings messages
## to the 'chatter' topic
import rospy
import csv
import sys
#from std_msgs.msg import Float64
from geometry_msgs.msg import Vector3
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('c1', Vector3, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
print ('Waiting for the scanner command (GoHome, Start, Stop): ');
name = raw_input()
corrector = Vector3()
corrector.x = 0.9
corrector.y = 0.98
corrector.z = 0.0
if name == 'GoHome':
corrector.z = 1.0
if name == 'Start':
corrector.z = 2.0
if name == 'Stop':
corrector.z = 3.0
#corrector2 = 1.2
#hello_str = "hello world %s" % rospy.get_time()
#rospy.loginfo(hello_str)
pub.publish(corrector)
#pub2.publish(corrector2)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| Shedino/RoverHighLevel | corrector_pkg/scripts/talker (backup).py | Python | cc0-1.0 | 2,693 |
'''
Created on Aug 14, 2015
@author: vkmguy
'''
from utility import DBConnectivity
from classes.hotel_detail import Hotel
def arrange_hotel(list_of_hotels):
try:
con=DBConnectivity.create_connection()
cur=DBConnectivity.create_cursor(con)
hotel_list={}
final_list=[]
for i in range(0,len(list_of_hotels)):
flag=0
hotelid=list_of_hotels[i].get_hotelid()
cur.execute("select hotelid,count(hotelid)from hotel_booking where hotelid=:hid group by hotelid",{"hid":hotelid})
for row in cur:
flag=1
if(row[0]==hotelid):
hotel_list.update({list_of_hotels[i]:(row[1]+1)})
else:
hotel_list.update({list_of_hotels[i]:1})
if(flag==0):
hotel_list.update({list_of_hotels[i]:1})
if(hotel_list=={}):
return list_of_hotels
max1=0
l=len(hotel_list)
hotel=Hotel()
while(l!=0):
max1=0
for key,value in hotel_list.items():
if(max1<value):
max1=value
hotel=key
final_list.append(hotel)
for key,value in hotel_list.items():
if(key.get_hotelid()==hotel.get_hotelid()):
hotel_list.update({hotel:-1})
l=l-1;
return final_list
except Exception as e:
print(e)
finally:
cur.close()
con.close()
| VIkramx89/Flights-and-Hotels | database/priortize.py | Python | epl-1.0 | 1,519 |
from . import interfaces
import dbus.service
import dbus.mainloop.glib
import dbus.exceptions
import threading
import signal
import tuned.logs
import tuned.consts as consts
from inspect import ismethod
from tuned.utils.polkit import polkit
from gi.repository import GLib
from types import FunctionType
try:
# Python3 version
# getfullargspec is not present in Python2, so when we drop P2 support
# replace "getargspec(func)" in code with "getfullargspec(func).args"
from inspect import getfullargspec
def getargspec(func):
return getfullargspec(func)
except ImportError:
# Python2 version, drop after support stops
from inspect import getargspec
log = tuned.logs.get()
class DBusExporter(interfaces.ExporterInterface):
"""
Export method calls through DBus Interface.
We take a method to be exported and create a simple wrapper function
to call it. This is required as we need the original function to be
bound to the original object instance. While the wrapper will be bound
to an object we dynamically construct.
"""
def __init__(self, bus_name, interface_name, object_name):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self._dbus_object_cls = None
self._dbus_object = None
self._dbus_methods = {}
self._signals = set()
self._bus_name = bus_name
self._interface_name = interface_name
self._object_name = object_name
self._thread = None
self._bus_object = None
self._polkit = polkit()
# dirty hack that fixes KeyboardInterrupt handling
# the hack is needed because PyGObject / GTK+-3 developers are morons
signal_handler = signal.getsignal(signal.SIGINT)
self._main_loop = GLib.MainLoop()
signal.signal(signal.SIGINT, signal_handler)
@property
def bus_name(self):
return self._bus_name
@property
def interface_name(self):
return self._interface_name
@property
def object_name(self):
return self._object_name
def running(self):
return self._thread is not None
def _prepare_for_dbus(self, method, wrapper):
source = """def {name}({args}):
return wrapper({args})
""".format(name=method.__name__, args=', '.join(getargspec(method.__func__).args))
code = compile(source, '<decorator-gen-%d>' % len(self._dbus_methods), 'exec')
# https://docs.python.org/3.9/library/inspect.html
# co_consts - tuple of constants used in the bytecode
# example:
# compile("e=2\ndef f(x):\n return x*2\n", "X", 'exec').co_consts
# (2, <code object f at 0x7f8c60c65330, file "X", line 2>, None)
# Because we have only one object in code (our function), we can use code.co_consts[0]
func = FunctionType(code.co_consts[0], locals(), method.__name__)
return func
def export(self, method, in_signature, out_signature):
if not ismethod(method):
raise Exception("Only bound methods can be exported.")
method_name = method.__name__
if method_name in self._dbus_methods:
raise Exception("Method with this name is already exported.")
def wrapper(owner, *args, **kwargs):
action_id = consts.NAMESPACE + "." + method.__name__
caller = args[-1]
log.debug("checking authorization for for action '%s' requested by caller '%s'" % (action_id, caller))
ret = self._polkit.check_authorization(caller, action_id)
if ret == 1:
log.debug("action '%s' requested by caller '%s' was successfully authorized by polkit" % (action_id, caller))
elif ret == 2:
log.warn("polkit error, but action '%s' requested by caller '%s' was successfully authorized by fallback method" % (action_id, caller))
elif ret == 0:
log.info("action '%s' requested by caller '%s' wasn't authorized, ignoring the request" % (action_id, caller))
args[-1] = ""
elif ret == -1:
log.warn("polkit error and action '%s' requested by caller '%s' wasn't authorized by fallback method, ignoring the request" % (action_id, caller))
args[-1] = ""
else:
log.error("polkit error and unable to use fallback method to authorize action '%s' requested by caller '%s', ignoring the request" % (action_id, caller))
args[-1] = ""
return method(*args, **kwargs)
wrapper = self._prepare_for_dbus(method, wrapper)
wrapper = dbus.service.method(self._interface_name, in_signature, out_signature, sender_keyword = "caller")(wrapper)
self._dbus_methods[method_name] = wrapper
def signal(self, method, out_signature):
if not ismethod(method):
raise Exception("Only bound methods can be exported.")
method_name = method.__name__
if method_name in self._dbus_methods:
raise Exception("Method with this name is already exported.")
def wrapper(owner, *args, **kwargs):
return method(*args, **kwargs)
wrapper = self._prepare_for_dbus(method, wrapper)
wrapper = dbus.service.signal(self._interface_name, out_signature)(wrapper)
self._dbus_methods[method_name] = wrapper
self._signals.add(method_name)
def send_signal(self, signal, *args, **kwargs):
err = False
if not signal in self._signals or self._bus_object is None:
err = True
try:
method = getattr(self._bus_object, signal)
except AttributeError:
err = True
if err:
raise Exception("Signal '%s' doesn't exist." % signal)
else:
method(*args, **kwargs)
def _construct_dbus_object_class(self):
if self._dbus_object_cls is not None:
raise Exception("The exporter class was already build.")
unique_name = "DBusExporter_%d" % id(self)
cls = type(unique_name, (dbus.service.Object,), self._dbus_methods)
self._dbus_object_cls = cls
def start(self):
if self.running():
return
if self._dbus_object_cls is None:
self._construct_dbus_object_class()
self.stop()
bus = dbus.SystemBus()
bus_name = dbus.service.BusName(self._bus_name, bus)
self._bus_object = self._dbus_object_cls(bus, self._object_name, bus_name)
self._thread = threading.Thread(target=self._thread_code)
self._thread.start()
def stop(self):
if self._thread is not None and self._thread.is_alive():
self._main_loop.quit()
self._thread.join()
self._thread = None
def _thread_code(self):
self._main_loop.run()
del self._bus_object
self._bus_object = None
| redhat-performance/tuned | tuned/exports/dbus_exporter.py | Python | gpl-2.0 | 6,086 |
"""
Authentication module that uses Spacewalk's auth system.
Any org_admin or kickstart_admin can get in.
Copyright 2007-2008, Red Hat, Inc
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import distutils.sysconfig
import sys
import xmlrpclib
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
def register():
"""
The mandatory cobbler module registration hook.
"""
return "authn"
def authenticate(api_handle,username,password):
"""
Validate a username/password combo, returning True/False
This will pass the username and password back to Spacewalk
to see if this authentication request is valid.
"""
spacewalk_url = api_handle.settings().spacewalk_url
client = xmlrpclib.Server(spacewalk_url, verbose=0)
valid = client.auth.checkAuthToken(username,password)
if valid is None:
return False
return (valid == 1)
| javiplx/cobbler-old_jobs | cobbler/modules/authn_spacewalk.py | Python | gpl-2.0 | 1,635 |
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Doug Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id: $
from libwebconnect import *
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
# Format: [[nav_type, id, name, url_pattern], ...]
WEBSITES = [
["Person", "Find-A-Grave", _("Find A Grave"), "http://www.findagrave.com/cgi-bin/fg.cgi?page=gsr&GSfn=%(given)s&GSmn=%(middle)s&GSln=%(surname)s&GSby=%(birth)s&GSbyrel=in&GSdy=%(death)s&GSdyrel=in&GScntry=0&GSst=0&GSgrid=&df=all&GSob=b"],
["Person", "FamilySearch", _("FamilySearch.org"), "https://familysearch.org/search/record/results?count=20&query=%%2Bgivenname%%3A%(given)s~ %%2Bsurname%%3A%(surname)s~ %%2Bbirth_year%%3A%(birth)s-%(birth)s~ %%2Bdeath_year%%3A%(death)s-%(death)s~"],
["Person", "US-Google", _("US Google"), '''http://www.google.com/#hl=en&q="%(surname)s,+%(given)s"'''],
["Person", "GenCircles", "GenCircle", "http://www.gencircles.com/globaltree/gosearch?f=%(surname)s&l=%(given)s&by=%(birth)s&ba=0&bp=&fa=&dy=%(death)s&da=0&mo=&dp=&sp=&t=Marriage&oy=&oa=0&op=&g.x=6&g.y=12"],
["Person", "german-immigrants.com", _("German Immigrants"), "http://www.german-immigrants.com/tng/search.php?mylastname=%(surname)s&myfirstname=%(given)s&mybool=AND&offset=0&search=Search"],
["Person", "Worldconnect-Rootsweb", "WorldConnect", "http://worldconnect.rootsweb.ancestry.com/cgi-bin/igm.cgi?surname=%(surname)s&given=%(given)s"],
["Person", "SSDI-Rootsweb", "Social Security Death Index", "http://ssdi.rootsweb.ancestry.com/cgi-bin/newssdi?sn=%(surname)s&fn=%(given)s&nt=exact"],
["Person", "CaliforniaDeathIndex", "California Death Index", "http://vitals.rootsweb.ancestry.com/ca/death/search.cgi?surname=%(surname)s&given=%(given)s"],
["Person", "SiteSearch", _("Free Rootsweb Site Search"), '''http://sitesearch.rootsweb.ancestry.com/cgi-bin/search?words="%(surname)s+%(given)s"'''],
["Person", "newspapers.nla.gov.au", "Australia / Newspaper Family Notices", "http://trove.nla.gov.au/newspaper/result?q=%(given)s+%(surname)s&exactPhrase=&anyWords=¬Words=&requestHandler=&dateFrom=&dateTo=&l-advcategory=Family+Notices&sortby="], # Australian
["Person", "Geneanet", "Geneanet", "http://search.geneanet.org/result.php?lang=en&name=%(surname)s"],
["Person", "Geneanet-Favrejhas", "Geneanet, Favrejhas", "http://gw1.geneanet.org/index.php3?b=favrejhas&m=NG&n=%(surname)s&t=N&x=0&y=0"], # French
["Person", "Roglo", "Roglo", "http://roglo.eu/roglo?m=NG&n=%(given)s+%(surname)s&t=PN"],
["Person", "Pow-mia", "US POW/MIA", "http://userdb.rootsweb.ancestry.com/pow_mia/cgi-bin/pow_mia.cgi?surname=%(surname)s&fname=%(given)s"],
["Person", "Disnorge", "DIS-Norge", "http://www.disnorge.no/gravminner/global1.php?ordalle=%(given)s+%(surname)s"], # Norway
["Person", "Mocavo", _("Mocavo"), '''http://www.mocavo.com/search?q="%(surname)s,+%(given)s"'''],
["Person", "Hathi Trust Digital Library", _("Hathi Trust Digital Library"), "http://babel.hathitrust.org/cgi/ls?q1=%(surname)s+%(given)s+&lmt=ft&a=srchls"],
["Person", "Open Library", _("Open Library"), "http://openlibrary.org/search?q=%(surname)s, %(given)s"],
]
def load_on_reg(dbstate, uistate, pdata):
# do things at time of load
# now return functions that take:
# dbstate, uistate, nav_type, handle
# and that returns a function that takes a widget
return lambda nav_type: \
make_search_functions(nav_type, WEBSITES)
| sam-m888/addons-source | USWebConnectPack/USWebPack.py | Python | gpl-2.0 | 4,295 |
#coding=utf-8
import random
class StatisRouter(object):
"""
A router to control all database operations on models in the
auth application.
"""
def db_for_read(self, model, **hints):
"""
Attempts to read auth models go to auth_db.
"""
if model._meta.app_label == 'statis':
return 'statis'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to auth_db.
"""
if model._meta.app_label == 'statis':
return 'statis'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label == 'statis' or\
obj2._meta.app_label == 'statis':
return True
return None
# def allow_migrate(self, db, model):
# """
# Make sure the auth app only appears in the 'auth_db'
# database.
# """
# if db == 'auth_db':
# return model._meta.app_label == 'auth'
# elif model._meta.app_label == 'auth':
# return False
# return None
class MasterSlaveRouter(object):
def db_for_read(self, model, **hints):
"""
Reads go to a randomly-chosen slave.
"""
return random.choice(['slave1', 'slave2'])
def db_for_write(self, model, **hints):
"""
Writes always go to master.
"""
return 'master'
def allow_relation(self, obj1, obj2, **hints):
"""
Relations between objects are allowed if both objects are
in the master/slave pool.
"""
db_list = ('master', 'slave1', 'slave2')
if obj1._state.db in db_list and obj2._state.db in db_list:
return True
return None
def allow_migrate(self, db, model):
"""
All non-auth models end up in this pool.
"""
return True | edisonlz/fruit | web_project/web/dbrouter/router.py | Python | apache-2.0 | 1,986 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('instance', '0002_auto_20150530_1255'),
]
operations = [
migrations.CreateModel(
name='InstanceLogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('created', django_extensions.db.fields.CreationDateTimeField(verbose_name='created', default=django.utils.timezone.now, editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(verbose_name='modified', default=django.utils.timezone.now, editable=False, blank=True)),
('text', models.TextField()),
('level', models.CharField(max_length=5, db_index=True, default='info', choices=[('debug', 'Debug'), ('info', 'Info'), ('warn', 'Warning'), ('error', 'Error'), ('exception', 'Exception')])),
('instance', models.ForeignKey(related_name='logentry_set', to='instance.OpenEdXInstance')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ServerLogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('created', django_extensions.db.fields.CreationDateTimeField(verbose_name='created', default=django.utils.timezone.now, editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(verbose_name='modified', default=django.utils.timezone.now, editable=False, blank=True)),
('text', models.TextField()),
('level', models.CharField(max_length=5, db_index=True, default='info', choices=[('debug', 'Debug'), ('info', 'Info'), ('warn', 'Warning'), ('error', 'Error'), ('exception', 'Exception')])),
('server', models.ForeignKey(related_name='logentry_set', to='instance.OpenStackServer')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='logentry',
name='instance',
),
migrations.DeleteModel(
name='LogEntry',
),
]
| brousch/opencraft | instance/migrations/0003_auto_20150531_1100.py | Python | agpl-3.0 | 2,499 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.sensors.wasb import WasbBlobSensor, WasbPrefixSensor
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(task_id='wasb_sensor_1', dag=self.dag, **self._config)
assert sensor.container_name == self._config['container_name']
assert sensor.blob_name == self._config['blob_name']
assert sensor.wasb_conn_id == self._config['wasb_conn_id']
assert sensor.check_options == {}
assert sensor.timeout == self._config['timeout']
sensor = WasbBlobSensor(
task_id='wasb_sensor_2', dag=self.dag, check_options={'timeout': 2}, **self._config
)
assert sensor.check_options == {'timeout': 2}
@mock.patch('airflow.providers.microsoft.azure.sensors.wasb.WasbHook', autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor', dag=self.dag, check_options={'timeout': 2}, **self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with('container', 'blob', timeout=2)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(task_id='wasb_sensor_1', dag=self.dag, **self._config)
assert sensor.container_name == self._config['container_name']
assert sensor.prefix == self._config['prefix']
assert sensor.wasb_conn_id == self._config['wasb_conn_id']
assert sensor.check_options == {}
assert sensor.timeout == self._config['timeout']
sensor = WasbPrefixSensor(
task_id='wasb_sensor_2', dag=self.dag, check_options={'timeout': 2}, **self._config
)
assert sensor.check_options == {'timeout': 2}
@mock.patch('airflow.providers.microsoft.azure.sensors.wasb.WasbHook', autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor', dag=self.dag, check_options={'timeout': 2}, **self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with('container', 'prefix', timeout=2)
| apache/incubator-airflow | tests/providers/microsoft/azure/sensors/test_wasb.py | Python | apache-2.0 | 3,763 |
#!/usr/bin/env python
"""
It is used to test the tornado web framework. This can be used:
-change of an underlying library such as ThreadPool
-change of the Tornado configuration (running more Tornado processes)
-Tornado scalability of a certain machine
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
import httplib
class Transaction(object):
def __init__(self):
self.custom_timers = {}
self.url = "/DIRAC/s:DIRAC-Certification/g:dteam_user/ExampleApp/getSelectionData"
self.conn = httplib.HTTPConnection("lhcb-cert-dirac.cern.ch")
def run(self):
# print len(datasets)
start_time = time.time()
self.conn.request("GET", self.url)
r1 = self.conn.getresponse()
if r1.status != 200:
print(r1.status, r1.reason)
_ = r1.read()
end_time = time.time()
self.custom_timers["Tornado_ResponseTime"] = end_time - start_time
if __name__ == "__main__":
trans = Transaction()
trans.run()
print(trans.custom_timers)
| ic-hep/DIRAC | tests/Performance/webserver/test_scripts/web.py | Python | gpl-3.0 | 1,103 |
"""
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
# ProfilingPanel has been intentionally removed for default devstack.py
# runtimes for performance reasons. If you wish to re-enable it in your
# local development environment, please create a new settings file
# that imports and extends devstack.py.
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
# # Skip RequireJS optimizer in development
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = False
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# Skip enrollment start date filtering
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
############## ECOMMERCE API CONFIGURATION SETTINGS ###############
ECOMMERCE_PUBLIC_URL_ROOT = "http://localhost:8002"
###################### Cross-domain requests ######################
FEATURES['ENABLE_CORS_HEADERS'] = True
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = True
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| nagyistoce/edx-platform | lms/envs/devstack.py | Python | agpl-3.0 | 7,127 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from catmaid.views import *
import catmaid
import vncbrowser
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from adminplus.sites import AdminSitePlus
admin.site = AdminSitePlus()
admin.autodiscover()
# CATMAID
urlpatterns = patterns('',
url(r'^', include('catmaid.urls')),
)
# Neuron Catalog
urlpatterns += patterns('',
url(r'^vncbrowser/', include('vncbrowser.urls')),
)
# Admin site
urlpatterns += patterns('',
url(r'^admin/', include(admin.site.urls))
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL.replace(settings.CATMAID_URL, ''),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| htem/CATMAID | django/projects/mysite/urls.py | Python | agpl-3.0 | 929 |
import matplotlib.pyplot as plt
import numpy as np
execfile('ex5.py')
geo = geo_f.polarExtrude()
t = np.linspace(0.,1.,15+2)[1:-1]
geo.refine(list_t=[t,None])
geo.plotMesh()
plt.show()
| ratnania/pigasus | doc/manual/include/approximation/ex6.py | Python | mit | 206 |
"""update_index_on_discount_policy_discount_code_base.
Revision ID: 36f458047cfd
Revises: 3a585b8d5f8d
Create Date: 2017-03-16 17:04:54.849590
"""
# revision identifiers, used by Alembic.
revision = '36f458047cfd'
down_revision = '3a585b8d5f8d'
from alembic import op
def upgrade():
op.drop_constraint(
'discount_policy_discount_code_base_key', 'discount_policy', type_='unique'
)
op.create_unique_constraint(
'discount_policy_organization_id_discount_code_base_key',
'discount_policy',
['organization_id', 'discount_code_base'],
)
def downgrade():
op.drop_constraint(
'discount_policy_organization_id_discount_code_base_key',
'discount_policy',
type_='unique',
)
op.create_unique_constraint(
'discount_policy_discount_code_base_key',
'discount_policy',
['discount_code_base'],
)
| hasgeek/boxoffice | migrations/versions/36f458047cfd_update_index_on_discount_title.py | Python | agpl-3.0 | 900 |
#!/usr/bin/env python2.5
import sys, struct, commands, getopt, sys, os.path
from pylab import *
from decoder import *
startRule = 20
split = 50
binDir = '../../bin'
resultsDir=''
iniTest = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hpi",
['resultsDir=',
'dbDir=',
'trainData=',
'devData=',
'testData=',
'output='])
except getopt.GetoptError, exc:
print("ERROR: " + exc.msg)
usage()
sys.exit(2)
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == "-i":
iniTest = True
elif o == "--resultsDir":
resultsDir = a
elif o == "--dbDir":
dbDir = a
elif o == "--trainData":
trainData = a
elif o == "--testData":
testData = a
elif o == "--devData":
devData = a
elif o == "--output":
output = a
def decode(data, db, inPickle, nRules = 0, iniTest = False):
outSem = 'graph.sem'
dcd = Decoder.readDecoderPickle(inPickle)
print dcd.trgCond
try:
dcd.loadDB(db)
dcd.loadData(data)
if iniTest:
dcd.loadTbedData(data+'.ini')
except IOError:
return 0, 0.0, 0.0, 0.0, 0.0
dcd.decode(nRules)
dcd.writeOutput(outSem)
o = commands.getoutput('%s/cuedSemScore.pl -d %s %s' % (binDir, outSem, data))
o = o.split()
return len(dcd.bestRules), o[0], o[1], o[2], o[3]
def decodeSet(data, dbDir, iniTest = False):
inPickle = os.path.join(resultsDir, 'rules.pckl-decoder')
i = startRule
iMax = startRule + 20
nRules = []
acc = []
prec = []
rec = []
fm = []
print data
while i<iMax:
iMax, a, p, r, f = decode(data, dbDir, inPickle, i, iniTest)
try:
acc.append(float(a))
prec.append(float(p))
rec.append(float(r))
fm.append(float(f))
except ValueError:
acc.append(0.0)
prec.append(0.0)
rec.append(0.0)
fm.append(0.0)
nRules.append(i)
print i, iMax, a, (p, r), f
if i+1 == iMax:
break
i += int(iMax/split)
if i > iMax:
i = iMax-1
return acc, prec, rec, fm, nRules
def findMax(data):
max = 0
iMax = 0
for i in range(len(data)):
if data[i]> max:
iMax = i
max = data[i]
return iMax
settingsFN = os.path.join(resultsDir,'settings')
f = file(settingsFN, 'r')
settings = ''
for s in f.readlines():
if s.startswith('TBEDP_TRN_DATA_FILE'):
settings += s+'\n'
if s.startswith('TBEDP_TRG_COND'):
settings += s.replace('{', '').replace('}','').replace("':", '=').replace("'",'')+'\n'
f.close()
outGraph = output+'.eps'
trainCleanAcc, trainCleanPrec, trainCleanRec, trainCleanF, nRules1 = decodeSet(trainData, dbDir, iniTest)
devCleanAcc, devCleanPrec, devCleanRec, devCleanF, nRules2 = decodeSet(devData, dbDir, iniTest)
testCleanAcc, testCleanPrec, testCleanRec, testCleanF, nRules3 = decodeSet(testData, dbDir,iniTest)
fig = figure(figsize=(11.7, 8.3))
title(output+ ' : ' + trainData)
xlabel('nRules - number of used rules')
ylabel('Acc [%], F[%]')
plot(nRules1, trainCleanAcc, "g-.")
plot(nRules1, trainCleanF, "g-")
plot(nRules2, devCleanAcc, "b-.")
plot(nRules2, devCleanF, "b-")
plot(nRules3, testCleanAcc, "r-.")
plot(nRules3, testCleanF, "r-")
legend(("train data - Accurracy",
"train data - Item F-masure",
" dev data - clean - Accurracy",
" dev data - clean - Item F-masure",
" test data - clean - Accurracy",
" test data - clean - Item F-masure"),
loc = "lower right")
grid(True)
i = findMax(devCleanF)
plot([nRules2[i]], [devCleanF[i]], 'bs-')
annotate('Best performance on the dev set.', (nRules2[i], devCleanF[i]),
(int(nRules2[-1]/2), devCleanF[i]-7),
arrowprops=dict(facecolor='black', shrink=0.05, width=1),
fontsize=14)
xlim(xmin=nRules2[0]-2)
text(int(nRules2[-1]/2.5), devCleanF[i]-9, 'Dev data: nRules=%d Acc=%.2f P=%.2f R=%.2f F=%.2f' % (nRules2[i], devCleanAcc[i], devCleanPrec[i], devCleanRec[i], devCleanF[i]), fontsize=12)
text(int(nRules2[-1]/2.5), devCleanF[i]-11, 'Test data: nRules=%d Acc=%.2f P=%.2f R=%.2f F=%.2f' % (nRules2[i], testCleanAcc[i], testCleanPrec[i], testCleanRec[i], testCleanF[i]), fontsize=12)
text(nRules2[0], devCleanF[i]-17, settings)
savefig(outGraph)
print commands.getoutput("epstopdf %s" % (outGraph))
print commands.getoutput("rm -f %s" % (outGraph))
| jurcicek/tbed-parser | src/graph.py | Python | gpl-2.0 | 4,675 |
import collections
import importlib
import inspect
import numpy as np
import pandas as pd
import string
from . import poly
from .tree import Node, Operator
Context = collections.namedtuple('Context', 'context index')
def to_list(l):
return '[' + ', '.join(map(str, l)) + ']'
def to_rexpr(root):
if isinstance(root, str):
return root
if isinstance(root, int):
return str(root)
if isinstance(root, float):
return str(root)
if root.type is Operator('+'):
return ' + '.join(map(to_rexpr, root.args))
if root.type is Operator('-'):
return ' - '.join(map(to_rexpr, root.args))
if root.type is Operator('*'):
return ' * '.join(map(to_rexpr, root.args))
if root.type is Operator('/'):
return ' / '.join(map(to_rexpr, root.args))
if root.type is Operator('I'):
return 'I(%s)' % to_rexpr(root.args[0])
if root.type is Operator('in'):
return '%s' % root.args[0]
if root.type is Operator('sel'):
return '%s[%d]' % (to_rexpr(root.args[0]), root.args[1])
if root.type is Operator('poly'):
return 'poly(%s, %d)' % (to_rexpr(root.args[0]), root.args[1])
if root.type is Operator('log'):
return 'log(%s)' % to_rexpr(root.args[0])
if root.type is Operator('exp'):
return 'exp(%s)' % to_rexpr(root.args[0])
if root.type is Operator('scale'):
return 'rescale(%s, newrange = c(%d, %d))' % (to_rexpr(root.args[0]),
root.args[1], root.args[2])
raise ValueError("unexpected node type: %s" % repr(root.type))
def to_repr(root):
if isinstance(root, str):
return root
if isinstance(root, int):
return "%d" % root
if isinstance(root, float):
return "%f" % root
if root.type is Operator('+'):
return '(+ ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator('-'):
return '(- ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator('*'):
return '(* ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator('/'):
return '(/ ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator(':'):
return '(* ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator('=='):
return '(== ' + ' '.join(map(to_repr, root.args)) + ')'
if root.type is Operator('in'):
return '%s' % root.args[0]
if root.type is Operator('I'):
return to_repr(root.args[0])
if root.type is Operator('var'):
return to_repr(root.args[1])
if root.type is Operator('list'):
return '[' + ', '.join(map(str, root.args)) + ']'
if root.type is Operator('sel'):
expr = to_repr(root.args[0])
return '(%s[%d])' % (expr, root.args[1])
#return '%s[%d]' % (expr, root.args[1])
#return '(%s[:, %d])' % (expr, root.args[1])
if root.type is Operator('poly'):
assert len(root.args) == 4, "norm2 and alpha not set"
return '(%s ** %d)' % (to_repr(root.args[0]), root.args[1])
if root.type is Operator('poly_fit'):
return 'poly.ortho_poly_fit(%s, %d)' % (to_repr(root.args[0]), root.args[1])
if root.type is Operator('log'):
return '(log(%s))' % to_repr(root.args[0])
if root.type is Operator('exp'):
return '(exp(%s))' % to_repr(root.args[0])
if root.type is Operator('scale'):
if len(root.args) == 3:
return '(scale(%s, %d, %d))' % (to_repr(root.args[0]),
root.args[1], root.args[2])
if len(root.args) == 5:
return '(scale(%s, %d, %d, %f, %f))' % (to_repr(root.args[0]),
root.args[1], root.args[2],
root.args[3], root.args[4])
raise ValueError("unexpected number of arguments for scale: %s" %
', '.join(map(str, root.args)))
if root.type is Operator('pow'):
return '(pow(%s, %s))' % (to_repr(root.args[0]),
to_repr(root.args[1]))
if root.type is Operator('max'):
return '(max(%s, %s))' % (to_repr(root.args[0]),
to_repr(root.args[1]))
if root.type is Operator('min'):
return '(min(%s, %s))' % (to_repr(root.args[0]),
to_repr(root.args[1]))
if root.type is Operator('clip'):
return '(clip(%s, %s, %s))' % (to_repr(root.args[0]),
to_repr(root.args[1]),
to_repr(root.args[2]))
if root.type is Operator('inv_logit'):
expr = to_repr(root.args[0])
return '(exp(%s) / (1 + exp(%s)))' % (expr, expr)
raise ValueError("unexpected node type: %s" % repr(root.type))
def to_expr(root, ctx=None):
recurse = lambda x: to_expr(x, ctx)
guvec = lambda: ctx is not None and ctx.context == 'guvec'
jit = lambda: ctx is not None and ctx.context == 'jit'
if isinstance(root, str):
return root
if isinstance(root, int):
return "np.float32(%d)" % root
if isinstance(root, float):
return "np.float32(%f)" % root
if root.type is Operator('+'):
return '(' + ' + '.join(map(recurse, root.args)) + ')'
if root.type is Operator('-'):
return '(' + ' - '.join(map(recurse, root.args)) + ')'
if root.type is Operator('*'):
return '(' + ' * '.join(map(recurse, root.args)) + ')'
if root.type is Operator('/'):
return '(' + ' / '.join(map(recurse, root.args)) + ')'
if root.type is Operator(':'):
return '(' + ' * '.join(map(recurse, root.args)) + ')'
if root.type is Operator('=='):
return '(' + ' == '.join(map(recurse, root.args)) + ')'
if root.type is Operator('in'):
if guvec():
return '%s[0]' % root.args[0]
elif jit():
return '%s[%s]' % (root.args[0], ctx.index)
else:
return '%s' % root.args[0]
if root.type is Operator('I'):
return recurse(root.args[0])
if root.type is Operator('var'):
return 'var_%d' % hash(root.args[0])
if root.type is Operator('list'):
return '[' + ', '.join(map(str, root.args)) + ']'
if root.type is Operator('sel'):
expr = recurse(root.args[0])
if isinstance(root.args[0], str):
import pdb; pdb.set_trace()
if guvec():
return '(%s[%d])' % (expr, root.args[1])
elif jit():
if isinstance(root.args[0], Node) and \
root.args[0].type is Operator('in'):
# NOTE: This is a horrible kludge. Naturally a select with an
# input would translate into
# var_333096001[idx][3]
# unfortunately numba is really slow at compiling this. Compile
# time drop from ~4 min to ~40 sec if I use
# var_333096001[idx, 3]
return '%s[%s, %d]' % (to_expr(root.args[0]), ctx.index, root.args[1])
else:
return '%s[%d]' % (expr, root.args[1])
else:
return '(%s[:, %d])' % (expr, root.args[1])
if root.type is Operator('poly'):
assert len(root.args) == 4, "norm2 and alpha not set"
return '(poly.ortho_poly_predict(%s, %s, %s, %d))' % (recurse(root.args[0]),
recurse(root.args[2]),
recurse(root.args[3]),
root.args[1])
if root.type is Operator('poly_fit'):
return 'poly.ortho_poly_fit(%s, %d)' % (recurse(root.args[0]), root.args[1])
if root.type is Operator('log'):
return '(ma.log(%s))' % recurse(root.args[0])
if root.type is Operator('exp'):
return '(ma.exp(%s))' % recurse(root.args[0])
if root.type is Operator('scale'):
if len(root.args) == 3:
return '(poly.scale(%s, %d, %d))' % (recurse(root.args[0]),
root.args[1], root.args[2])
if len(root.args) == 5:
return '(poly.scale(%s, %d, %d, %f, %f))' % (recurse(root.args[0]),
root.args[1], root.args[2],
root.args[3], root.args[4])
raise ValueError("unexpected number of arguments for scale: %s" %
', '.join(map(str, root.args)))
if root.type is Operator('pow'):
return '(np.power(%s, %s))' % (recurse(root.args[0]),
recurse(root.args[1]))
if root.type is Operator('max'):
return '(np.maximum(%s, %s))' % (recurse(root.args[0]),
recurse(root.args[1]))
if root.type is Operator('min'):
return '(np.minimum(%s, %s))' % (recurse(root.args[0]),
recurse(root.args[1]))
if root.type is Operator('clip'):
return '(np.clip(%s, %s, %s))' % (recurse(root.args[0]),
recurse(root.args[1]),
recurse(root.args[2]))
if root.type is Operator('inv_logit'):
expr = recurse(root.args[0])
return '(np.exp(%s) / (1 + np.exp(%s)))' % (expr, expr)
raise ValueError("unexpected node type: %s" % repr(root.type))
def make_inputs(root):
def replace(node):
if isinstance(node, str):
return Node(Operator('in'), (node, 'float32[:]'))
if isinstance(node, Node) and node.type is Operator('in'):
raise StopIteration
return node
return replace(root) if isinstance(root, str) else root.transform(replace)
def find_syms(root, ctx=None):
lsyms = collections.OrderedDict()
if isinstance(root, Node):
for node in root.walk():
if isinstance(node, Node) and node.type == Operator('var'):
name = 'var_%d' % node.args[0]
if name not in lsyms:
lsyms[name] = to_expr(node.args[1], ctx)
return lsyms
def find_inputs(root):
return tuple(find_input_types(root).keys())
def find_input_types(root):
types = collections.OrderedDict()
assert isinstance(root, Node), 'node should be a Node'
for node in root.walk():
if (isinstance(node, Node) and node.type is Operator('in') and
node.args[0] not in types):
types[node.args[0]] = node.args[1:3]
return types
def find_nonvector(root):
letters = list(string.ascii_lowercase)
nonvector = (Operator('poly'), )
nodes = collections.OrderedDict()
def replace(node):
if not isinstance(node, Node):
return node
if node.type is Operator('var') and node.args[1].type in nonvector:
nodes[node.args[0]] = node.args[1]
if node.args[1].type is Operator('poly'):
t = 'float32[:, :]'
else:
t = 'float32[:]'
return Node(Operator('in'), ("var_%d" % node.args[0], t))
if node.type in nonvector:
nodes[hash(node)] = node
if node.type is Operator('poly'):
t = 'float32[:, :]'
else:
t = 'float32[:]'
#return "var_%d" % hash(node)
return Node(Operator('in'), ("var_%d" % hash(node), t))
return node
root.transform(replace)
## Walk the tree again to verify are no non-vectorizable nodes left
for node in root.walk():
if isinstance(node, Node):
assert not node.type in nonvector, 'non-vectorizable operator left'
return nodes
def evalr(root, stab):
if isinstance(stab, pd.DataFrame):
lsyms = dict()
for col in stab.columns:
lsyms[col] = stab[col]
else:
lsyms = stab
for name, expr in find_syms(root).items():
if name not in lsyms:
lsyms[name] = eval(to_expr(expr), None, lsyms)
return eval(to_expr(root), None, lsyms)
def to_py(root, fname):
lsyms = find_syms(root)
decls = ["%s = %s" % (name, lsyms[name]) for name in lsyms.keys()]
inputs = find_inputs(root)
prelude = '''
import numpy as np
import numpy.ma as ma
import projections.r2py.poly as poly
'''
expr = 'return ' + to_expr(root)
iodecls = ['%s' % v for v in sorted(set(inputs))]
fdecl = "def %s(%s):" % (fname, ', '.join(iodecls))
fun1 = fdecl + "\n " + ";".join(decls) + "\n " + expr
fdecl = "def %s_st(stab):" % fname
iodecls = ["stab['%s']" % v for v in sorted(set(inputs))]
fun2 = fdecl + "\n return %s(%s)" % (fname, ", ".join(iodecls))
fun3 = "def func_name(): return '%s'" % fname
return prelude + "\n\n\n" + fun1 + "\n\n\n" + fun2 + "\n\n\n" + fun3 + "\n\n"
def to_pyx(root, fname):
lsyms = find_syms(root)
decls = ["cdef np.ndarray %s = %s" % (name, lsyms[name])
for name in lsyms.keys()]
inputs = find_inputs(root)
prelude = '''
cimport cython
import projections.r2py.poly as poly
cimport numpy as np
import numpy.ma as ma
DTYPE = np.float
ctypedef np.float_t DTYPE_t
'''
expr = 'return ' + to_expr(root)
iodecls = ['np.ndarray[DTYPE_t, ndim=1] %s not None' % v
for v in sorted(set(inputs))]
fdecl = ("@cython.boundscheck(False)\n" +
"def %s(%s):" % (fname, ', '.join(iodecls)))
fun1 = fdecl + "\n " + ";".join(decls + [expr])
fun2 = "def func_name(): return '%s'" % fname
return prelude + "\n\n\n" + fun1 + "\n\n\n" + fun2 + "\n\n"
# rreplace('(), (), ()', ',', ' ->', 1)
def rreplace(src, what, repl, num=1):
return repl.join(src.rsplit(what, num))
def to_numba(root, fname, out_name, child=False, ctx=None):
inputs = find_inputs(root)
poly_src = inspect.getsource(poly.ortho_poly_predict)
impts = '''
from numba import jit, guvectorize, float32, int64
import numpy as np
import numpy.ma as ma
import projections.r2py.poly as poly
{poly}
'''.format(poly=poly_src)
## *****
## NOTE: This operation modifies the expression tree.
## *****
io_types = find_input_types(root)
ios = sorted(io_types.keys())
nonvector = find_nonvector(root)
params = ["df['%s']" % v for v in sorted(inputs)]
if nonvector:
vec_fun = to_numba(root, '_' + fname, out_name, child=True)
inner_inputs = sorted(find_inputs(root))
stmts = ["var_%d = %s" % (name, to_expr(nonvector[name]))
for name in nonvector.keys()]
body = '''
{vec_fun}
def {fname}({iodecls}):
{stmts}
return _{fname}({inputs})
'''.format(vec_fun = vec_fun,
fname = fname,
iodecls = ', '.join(ios),
stmts = '\n '.join(stmts),
inputs = ', '.join(inner_inputs))
else:
lsyms = find_syms(root, Context('jit', 'idx'))
stmts = ["%s = %s" % (name, lsyms[name]) for name in lsyms.keys()]
expr = to_expr(root, Context('jit', 'idx'))
nb_types = ', '.join([io_types[x][0] for x in sorted(io_types)])
body = '''
@jit(#[float32[:]({nb_types})],
cache=True, nopython=True, nogil=True)
def {fname}({iodecls}):
res = np.empty({first_in}.size, dtype=np.float32)
for idx in np.arange({first_in}.size):
{stmts}
res[idx] = {expr}
return res
'''.format(nb_types = nb_types,
fname = fname,
iodecls = ', '.join(ios),
first_in = inputs[0],
stmts = "\n ".join(stmts),
expr = expr)
if child:
return body
code = '''
{prelude}
{body}
def {fname}_st(df):
return {fname}({params})
def inputs():
return {in_list}
def output():
return "{out_name}"
def func_name():
return "{fname}"
'''.format(prelude = impts if not child else '',
body = body,
fname = fname,
params = ', '.join(params),
in_list = sorted(inputs),
out_name = out_name)
return code
| ricardog/raster-project | projections/r2py/reval.py | Python | apache-2.0 | 15,063 |
# ctx.py
# Copyright (c) 2013-2020 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,E1129,R0205,R0903,W0105,W1113
# Standard library imports
from __future__ import print_function
import os
import platform
import shutil
import tempfile
import time
import types
# PyPI imports
import decorator
if os.environ.get("APPVEYOR", None): # pragma: no cover
tempfile.tempdir = os.environ["CITMP"]
###
# Context managers
###
@decorator.contextmanager
def ignored(*exceptions):
"""
Execute commands and selectively ignore exceptions.
Inspired by `"Transforming Code into Beautiful, Idiomatic Python"
<https://pyvideo.org/video/1780/
transforming-code-into-beautiful-idiomatic-pytho>`_ talk at PyCon US
2013 by Raymond Hettinger.
:param exceptions: Exception type(s) to ignore
:type exceptions: Exception object, i.e. RuntimeError, OSError, etc.
For example:
.. =[=cog
.. import os, sys, pmisc, docs.support
.. fname = sys.modules['docs.support'].__file__
.. mdir = os.path.realpath(os.path.dirname(fname))
.. pmisc.incfile('pmisc_example_1.py', cog.out, '1, 6-', mdir)
.. =]=
.. code-block:: python
# pmisc_example_1.py
from __future__ import print_function
import os, pmisc
def ignored_example():
fname = 'somefile.tmp'
open(fname, 'w').close()
print('File {0} exists? {1}'.format(
fname, os.path.isfile(fname)
))
with pmisc.ignored(OSError):
os.remove(fname)
print('File {0} exists? {1}'.format(
fname, os.path.isfile(fname)
))
with pmisc.ignored(OSError):
os.remove(fname)
print('No exception trying to remove a file that does not exists')
try:
with pmisc.ignored(RuntimeError):
os.remove(fname)
except:
print('Got an exception')
.. =[=end=]=
.. code-block:: python
>>> import docs.support.pmisc_example_1
>>> docs.support.pmisc_example_1.ignored_example()
File somefile.tmp exists? True
File somefile.tmp exists? False
No exception trying to remove a file that does not exists
Got an exception
"""
try:
yield
except exceptions:
pass
class Timer(object):
r"""
Time profile of code blocks.
The profiling is done by calculating elapsed time between the context
manager entry and exit time points. Inspired by `Huy Nguyen's blog
<http://pythonic.zoomquiet.top/data/20170602154836/index.html>`_.
:param verbose: Flag that indicates whether the elapsed time is printed
upon exit (True) or not (False)
:type verbose: boolean
:returns: :py:class:`pmisc.Timer`
:raises RuntimeError: Argument \`verbose\` is not valid
For example:
.. =[=cog
.. import os, sys, pmisc, docs.support
.. fname = sys.modules['docs.support'].__file__
.. mdir = os.path.realpath(os.path.dirname(fname))
.. pmisc.incfile('pmisc_example_2.py', cog.out, '1, 6-', mdir)
.. =]=
.. code-block:: python
# pmisc_example_2.py
from __future__ import print_function
import pmisc
def timer(num_tries, fpointer):
with pmisc.Timer() as tobj:
for _ in range(num_tries):
fpointer()
print('Time per call: {0} seconds'.format(
tobj.elapsed_time/(2.0*num_tries)
))
def sample_func():
count = 0
for num in range(0, count):
count += num
.. =[=end=]=
.. code-block:: python
>>> from docs.support.pmisc_example_2 import *
>>> timer(100, sample_func) #doctest: +ELLIPSIS
Time per call: ... seconds
"""
def __init__(self, verbose=False): # noqa
if not isinstance(verbose, bool):
raise RuntimeError("Argument `verbose` is not valid")
self._tstart = None
self._tstop = None
self._elapsed_time = None
self._verbose = verbose
def __enter__(self): # noqa
self._tstart = time.time()
return self
def __exit__(self, exc_type, exc_value, exc_tb): # noqa
self._tstop = time.time()
# time.time() returns time in seconds since the epoch
self._elapsed_time = 1000.0 * (self._tstop - self._tstart)
if self._verbose:
print("Elapsed time: {time}[msec]".format(time=self._elapsed_time))
return not exc_type is not None
def _get_elapsed_time(self):
return self._elapsed_time
elapsed_time = property(_get_elapsed_time, doc="Elapsed time")
"""
Returns elapsed time (in milliseconds) between context manager entry and
exit time points
:rtype: float
"""
class TmpDir(object):
r"""
Create a temporary (sub)directory.
:param dpath: Directory under which temporary (sub)directory is to
be created. If None the (sub)directory is created
under the default user's/system temporary directory
:type dpath: string or None
:returns: temporary directory absolute path
:raises RuntimeError: Argument \`dpath\` is not valid
.. warning:: The file name returned uses the forward slash (``/``) as
the path separator regardless of the platform. This avoids
`problems <https://pythonconquerstheuniverse.wordpress.com/2008/06/04/
gotcha-%E2%80%94-backslashes-in-windows-filenames/>`_ with
escape sequences or mistaken Unicode character encodings (``\\user``
for example). Many functions in the os module of the standard library (
`os.path.normpath()
<https://docs.python.org/3/library/os.path.html#os.path.normpath>`_ and
others) can change this path separator to the operating system path
separator if needed
"""
def __init__(self, dpath=None): # noqa
if (dpath is not None) and (
(not isinstance(dpath, str))
or (isinstance(dpath, str) and not os.path.isdir(dpath))
):
raise RuntimeError("Argument `dpath` is not valid")
self._dpath = os.path.abspath(dpath) if (dpath is not None) else dpath
self._dname = None
def __enter__(self): # noqa
dname = tempfile.mkdtemp(dir=self._dpath)
if platform.system().lower() == "windows": # pragma: no cover
dname = dname.replace(os.sep, "/")
self._dname = dname
return self._dname
def __exit__(self, exc_type, exc_value, exc_tb): # noqa
with ignored(OSError):
shutil.rmtree(self._dname)
return not exc_type is not None
class TmpFile(object):
r"""
Creates a temporary file that is deleted at context manager exit.
The context manager can optionally set up hooks for a provided function to
write data to the created temporary file.
:param fpointer: Pointer to a function that writes data to file.
If the argument is not None the function pointed to
receives exactly one argument, a file-like object
:type fpointer: function object or None
:param args: Positional arguments for pointer function
:type args: any
:param kwargs: Keyword arguments for pointer function
:type kwargs: any
:returns: temporary file name
:raises RuntimeError: Argument \`fpointer\` is not valid
.. warning:: The file name returned uses the forward slash (``/``) as
the path separator regardless of the platform. This avoids
`problems <https://pythonconquerstheuniverse.wordpress.com/2008/06/04/
gotcha-%E2%80%94-backslashes-in-windows-filenames/>`_ with
escape sequences or mistaken Unicode character encodings (``\\user``
for example). Many functions in the os module of the standard library (
`os.path.normpath()
<https://docs.python.org/3/library/os.path.html#os.path.normpath>`_ and
others) can change this path separator to the operating system path
separator if needed
For example:
.. =[=cog
.. import os, sys, pmisc, docs.support
.. fname = sys.modules['docs.support'].__file__
.. mdir = os.path.realpath(os.path.dirname(fname))
.. pmisc.incfile('pmisc_example_3.py', cog.out, '1, 6-', mdir)
.. =]=
.. code-block:: python
# pmisc_example_3.py
from __future__ import print_function
import pmisc
def write_data(file_handle):
file_handle.write('Hello world!')
def show_tmpfile():
with pmisc.TmpFile(write_data) as fname:
with open(fname, 'r') as fobj:
lines = fobj.readlines()
print('\n'.join(lines))
.. =[=end=]=
.. code-block:: python
>>> from docs.support.pmisc_example_3 import *
>>> show_tmpfile()
Hello world!
"""
def __init__(self, fpointer=None, *args, **kwargs): # noqa
if (
fpointer
and (not isinstance(fpointer, types.FunctionType))
and (not isinstance(fpointer, types.LambdaType))
):
raise RuntimeError("Argument `fpointer` is not valid")
self._fname = None
self._fpointer = fpointer
self._args = args
self._kwargs = kwargs
def __enter__(self): # noqa
fdesc, fname = tempfile.mkstemp()
# fdesc is an OS-level file descriptor, see problems if this
# is not properly closed in this post:
# https://www.logilab.org/blogentry/17873
os.close(fdesc)
if platform.system().lower() == "windows": # pragma: no cover
fname = fname.replace(os.sep, "/")
self._fname = fname
if self._fpointer:
with open(self._fname, "w") as fobj:
self._fpointer(fobj, *self._args, **self._kwargs)
return self._fname
def __exit__(self, exc_type, exc_value, exc_tb): # noqa
with ignored(OSError):
os.remove(self._fname)
return not exc_type is not None
| pmacosta/pmisc | pmisc/ctx.py | Python | mit | 10,205 |
from collections import OrderedDict
import csv
import logging
from tempfile import NamedTemporaryFile
import MySQLdb
from airflow.hooks import HiveCliHook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class MySqlToHiveTransfer(BaseOperator):
"""
Moves data from MySql to Hive. The operator runs your query against
MySQL, stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param hive_table: target Hive table, use dot notation to target a
specific database
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hive_conn_id: destination hive connection
:type hive_conn_id: str
"""
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
hive_table,
create=True,
recreate=False,
partition=None,
delimiter=chr(1),
mysql_conn_id='mysql_default',
hive_cli_conn_id='hive_cli_default',
*args, **kwargs):
super(MySqlToHiveTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = delimiter
self.mysql_conn_id = mysql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
@classmethod
def type_map(cls, mysql_type):
t = MySQLdb.constants.FIELD_TYPE
d = {
t.BIT: 'INT',
t.DECIMAL: 'DOUBLE',
t.DOUBLE: 'DOUBLE',
t.FLOAT: 'DOUBLE',
t.INT24: 'INT',
t.LONG: 'INT',
t.LONGLONG: 'BIGINT',
t.SHORT: 'INT',
t.YEAR: 'INT',
}
return d[mysql_type] if mysql_type in d else 'STRING'
def execute(self, context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
logging.info("Dumping MySQL query results to local file")
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile("w") as f:
csv_writer = csv.writer(f, delimiter=self.delimiter)
field_dict = OrderedDict()
for field in cursor.description:
field_dict[field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
f.flush()
cursor.close()
conn.close()
logging.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate)
| smarden1/airflow | airflow/operators/mysql_to_hive.py | Python | apache-2.0 | 4,095 |
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = ["pyglet", "polytanks", "codecs", "encodings", "selectors"],
excludes = ["tkinter", "PyQt5", "PIL", "setuptools"]
, include_files="assets")
import sys
base = 'Win32GUI' if sys.platform=='win32' else None
executables = [
Executable('main.py', base=base, targetName = 'cliente.exe')
]
setup(name='polytanks-cliente',
version = '1.0',
description = 'Cliente de Polytanks',
options = dict(build_exe = buildOptions),
executables = executables)
| dotoscat/Polytank-ASIR | client_setup.py | Python | agpl-3.0 | 626 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from lxml import etree
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, FORMATS, FORMAT
from superdesk.etree import parse_html
from superdesk.text_utils import get_text
from superdesk.publish import registered_transmitters
formatters = []
logger = logging.getLogger(__name__)
class FormatterRegistry(type):
"""Registry metaclass for formatters."""
def __init__(cls, name, bases, attrs):
"""Register sub-classes of Formatter class when defined."""
super(FormatterRegistry, cls).__init__(name, bases, attrs)
if name != "Formatter":
formatters.append(cls)
class Formatter(metaclass=FormatterRegistry):
"""Base Formatter class for all types of Formatters like News ML 1.2, News ML G2, NITF, etc."""
def __init__(self):
self.can_preview = False
self.can_export = False
self.destination = None
self.subscriber = None
def format(self, article, subscriber, codes=None):
"""Formats the article and returns the transformed string"""
raise NotImplementedError()
def export(self, article, subscriber, codes=None):
"""Formats the article and returns the output string for export"""
raise NotImplementedError()
def can_format(self, format_type, article):
"""Test if formatter can format for given article."""
raise NotImplementedError()
def append_body_footer(self, article):
"""
Checks if the article has any Public Service Announcements and if available appends each of them to the body.
:return: body with public service announcements.
"""
try:
article["body_html"] = article["body_html"].replace("<br>", "<br/>")
except KeyError:
pass
body = ""
if article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]:
body = article.get("body_html", "")
elif article[ITEM_TYPE] in [CONTENT_TYPE.AUDIO, CONTENT_TYPE.PICTURE, CONTENT_TYPE.VIDEO]:
body = article.get("description", "")
if body and article.get(FORMAT, "") == FORMATS.PRESERVED:
body = body.replace("\n", "\r\n").replace("\r\r", "\r")
parsed = parse_html(body, content="html")
for br in parsed.xpath("//br"):
br.tail = "\r\n" + br.tail if br.tail else "\r\n"
etree.strip_elements(parsed, "br", with_tail=False)
body = etree.tostring(parsed, encoding="unicode")
if body and article.get("body_footer"):
footer = article.get("body_footer")
if article.get(FORMAT, "") == FORMATS.PRESERVED:
body = "{}\r\n{}".format(body, get_text(footer))
else:
body = "{}{}".format(body, footer)
return body
def append_legal(self, article, truncate=False):
"""
Checks if the article has the legal flag on and adds 'Legal:' to the slugline
:param article: article having the slugline
:param truncate: truncates the slugline to 24 characters
:return: updated slugline
"""
slugline = article.get("slugline", "") or ""
if article.get("flags", {}).get("marked_for_legal", False):
slugline = "{}: {}".format("Legal", slugline)
if truncate:
slugline = slugline[:24]
return slugline
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param etree.Element element: The xml element to populate
:param str html: the html to parse the text from
:return:
"""
root = parse_html(html, content="html")
# if there are no ptags just br
if not len(root.xpath("//p")) and len(root.xpath("//br")):
para = etree.SubElement(element, "p")
for br in root.xpath("//br"):
etree.SubElement(para, "br").text = br.text
for p in root.xpath("//p"):
para = etree.SubElement(element, "p")
if len(p.xpath(".//br")) > 0:
for br in p.xpath(".//br"):
etree.SubElement(para, "br").text = br.text
para.text = etree.tostring(p, encoding="unicode", method="text")
# there neither ptags pr br's
if len(list(element)) == 0:
etree.SubElement(element, "p").text = etree.tostring(root, encoding="unicode", method="text")
def set_destination(self, destination=None, subscriber=None):
self.destination = destination
self.subscriber = subscriber
def get_formatter(format_type, article):
for formatter_cls in formatters:
formatter_instance = formatter_cls()
if formatter_instance.can_format(format_type, article):
return formatter_instance
def get_all_formatters():
"""Return all formatters registered."""
return [formatter_cls() for formatter_cls in formatters]
from .nitf_formatter import NITFFormatter # NOQA
from .ninjs_formatter import NINJSFormatter, NINJS2Formatter # NOQA
from .newsml_1_2_formatter import NewsML12Formatter # NOQA
from .newsml_g2_formatter import NewsMLG2Formatter # NOQA
from .email_formatter import EmailFormatter # NOQA
from .ninjs_newsroom_formatter import NewsroomNinjsFormatter # NOQA
from .idml_formatter import IDMLFormatter # NOQA
from .ninjs_ftp_formatter import FTPNinjsFormatter # NOQA
from .imatrics import IMatricsFormatter # NOQA
| petrjasek/superdesk-core | superdesk/publish/formatters/__init__.py | Python | agpl-3.0 | 5,788 |
#!/usr/bin/env python
# ************** <auto-copyright.pl BEGIN do not edit this line> **************
#
# TweekQt is (C) Copyright 2003-2011 by Patrick Hartling
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this application; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# *************** <auto-copyright.pl END do not edit this line> ***************
import sys
from os import getenv
from os import path
from qt import QApplication, QMimeSourceFactory, QStringList
from pytweek import tweekframe, prefs
class EnvError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
tweek_base_dir = getenv("TWEEK_BASE_DIR")
if not tweek_base_dir:
raise EnvError, "TWEEK_BASE_DIR environment variable not set"
# Construct the path to the images that will be loaded by the GUI.
image_dir = path.join(tweek_base_dir, "share", "tweek", "python", "images")
# Set up PyQt.
app = QApplication(sys.argv)
# XXX: Is there a cleaner way to define a QStringList from Python?
QMimeSourceFactory.defaultFactory().setFilePath(QStringList(image_dir))
# Load the user's preferences file. If it does not exist, it will be created.
prefs = prefs.GlobalPreferences("/home/patrick/.tweekrc")
prefs.parse()
frame = tweekframe.TweekFrame()
# XXX: This is a hack because I am lame.
frame.globalPrefs = prefs
frame.subjMgrList = []
frame.show()
app.exec_loop()
| godbyk/vrjuggler-upstream-old | modules/tweek/python/tweek-qt.py | Python | lgpl-2.1 | 2,014 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
FileExists,
Not
)
import integration_tests
class LibraryPrecedenceTestCase(integration_tests.TestCase):
def test_snapped_library_takes_precedence_over_system(self):
self.run_snapcraft('stage', 'fake-curl-library')
self.run_snapcraft(['prime', 'main'])
# Verify that, while the binary was primed, no library was pulled in.
self.assertThat(
os.path.join(self.prime_dir, 'bin', 'main'), FileExists())
self.assertThat(
os.path.join(self.prime_dir, 'lib'), Not(DirExists()))
self.assertThat(
os.path.join(self.prime_dir, 'usr'), Not(DirExists()))
# Prime the rest of the way.
self.run_snapcraft('prime')
# Now verify the lib we got was the one from the snap, not from the
# system.
self.assertThat(
os.path.join(self.prime_dir, 'lib', 'libcurl.so'),
FileExists())
self.assertThat(
os.path.join(self.prime_dir, 'usr'), Not(DirExists()))
| josepht/snapcraft | integration_tests/test_library_precedence.py | Python | gpl-3.0 | 1,749 |
import os
from os.path import abspath, dirname, join
from setuptools import setup
__version__ = None
exec(open("ripe/atlas/tools/version.py").read())
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# Get proper long description for package
current_dir = dirname(abspath(__file__))
description = open(join(current_dir, "README.rst")).read()
changes = open(join(current_dir, "CHANGES.rst")).read()
long_description = '\n\n'.join([description, changes])
# Get the long description from README.md
setup(
name="ripe.atlas.tools",
version=__version__,
packages=["ripe", "ripe.atlas", "ripe.atlas.tools"],
namespace_packages=["ripe", "ripe.atlas"],
include_package_data=True,
license="GPLv3",
description="The official command line client for RIPE Atlas",
long_description=long_description,
url="https://github.com/RIPE-NCC/ripe-atlas-tools",
download_url="https://github.com/RIPE-NCC/ripe-atlas-tools",
author="The RIPE Atlas team",
author_email="[email protected]",
maintainer="The RIPE Atlas team",
maintainer_email="[email protected]",
install_requires=[
"IPy",
"python-dateutil",
"requests>=2.7.0",
"ripe.atlas.cousteau>=1.4,<2",
"ripe.atlas.sagan>=1.2,<2",
"tzlocal",
"pyyaml",
"pyOpenSSL>=0.13",
],
tests_require=[
"nose",
"coverage",
"mock==3.0.5",
],
extras_require={
"doc": ["sphinx", "sphinx_rtd_theme"],
"fast": ["ujson"],
},
test_suite="nose.collector",
scripts=[
"scripts/aping",
"scripts/atraceroute",
"scripts/adig",
"scripts/asslcert",
"scripts/ahttp",
"scripts/antp",
"scripts/ripe-atlas",
],
keywords=['RIPE', 'RIPE NCC', 'RIPE Atlas', 'Command Line'],
classifiers=[
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP",
],
)
| robert-kisteleki/ripe-atlas-tools | setup.py | Python | gpl-3.0 | 2,443 |
"""Exercises for eager loading.
Derived from mailing list-reported problems and trac tickets.
These are generally very old 0.1-era tests and at some point should
be cleaned up and modernized.
"""
import datetime
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, backref, create_session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
class EagerTest(fixtures.MappedTest):
run_deletes = None
run_inserts = "once"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
if testing.db.dialect.supports_native_boolean:
false = 'false'
else:
false = "0"
cls.other['false'] = false
Table('owners', metadata ,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
Table('categories', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(20)))
Table('tests', metadata ,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('owner_id', Integer, ForeignKey('owners.id'),
nullable=False),
Column('category_id', Integer, ForeignKey('categories.id'),
nullable=False))
Table('options', metadata ,
Column('test_id', Integer, ForeignKey('tests.id'), primary_key=True),
Column('owner_id', Integer, ForeignKey('owners.id'), primary_key=True),
Column('someoption', sa.Boolean, server_default=false, nullable=False))
@classmethod
def setup_classes(cls):
class Owner(cls.Basic):
pass
class Category(cls.Basic):
pass
class Thing(cls.Basic):
pass
class Option(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Category, owners, Option, tests, Thing, Owner, options, categories = (cls.classes.Category,
cls.tables.owners,
cls.classes.Option,
cls.tables.tests,
cls.classes.Thing,
cls.classes.Owner,
cls.tables.options,
cls.tables.categories)
mapper(Owner, owners)
mapper(Category, categories)
mapper(Option, options, properties=dict(
owner=relationship(Owner),
test=relationship(Thing)))
mapper(Thing, tests, properties=dict(
owner=relationship(Owner, backref='tests'),
category=relationship(Category),
owner_option=relationship(Option,
primaryjoin=sa.and_(tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id),
foreign_keys=[options.c.test_id, options.c.owner_id],
uselist=False)))
@classmethod
def insert_data(cls):
Owner, Category, Option, Thing = (cls.classes.Owner,
cls.classes.Category,
cls.classes.Option,
cls.classes.Thing)
session = create_session()
o = Owner()
c = Category(name='Some Category')
session.add_all((
Thing(owner=o, category=c),
Thing(owner=o, category=c, owner_option=Option(someoption=True)),
Thing(owner=o, category=c, owner_option=Option())))
session.flush()
def test_noorm(self):
"""test the control case"""
tests, options, categories = (self.tables.tests,
self.tables.options,
self.tables.categories)
# I want to display a list of tests owned by owner 1
# if someoption is false or he hasn't specified it yet (null)
# but not if he set it to true (example someoption is for hiding)
# desired output for owner 1
# test_id, cat_name
# 1 'Some Category'
# 3 "
# not orm style correct query
print "Obtaining correct results without orm"
result = sa.select(
[tests.c.id,categories.c.name],
sa.and_(tests.c.owner_id == 1,
sa.or_(options.c.someoption==None,
options.c.someoption==False)),
order_by=[tests.c.id],
from_obj=[tests.join(categories).outerjoin(options, sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id))]
).execute().fetchall()
eq_(result, [(1, u'Some Category'), (3, u'Some Category')])
def test_withoutjoinedload(self):
Thing, tests, options = (self.classes.Thing,
self.tables.tests,
self.tables.options)
s = create_session()
l = (s.query(Thing).
select_from(tests.outerjoin(options,
sa.and_(tests.c.id == options.c.test_id,
tests.c.owner_id ==
options.c.owner_id))).
filter(sa.and_(tests.c.owner_id==1,
sa.or_(options.c.someoption==None,
options.c.someoption==False))))
result = ["%d %s" % ( t.id,t.category.name ) for t in l]
eq_(result, [u'1 Some Category', u'3 Some Category'])
def test_withjoinedload(self):
"""
Test that an joinedload locates the correct "from" clause with which to
attach to, when presented with a query that already has a complicated
from clause.
"""
Thing, tests, options = (self.classes.Thing,
self.tables.tests,
self.tables.options)
s = create_session()
q=s.query(Thing).options(sa.orm.joinedload('category'))
l=(q.select_from(tests.outerjoin(options,
sa.and_(tests.c.id ==
options.c.test_id,
tests.c.owner_id ==
options.c.owner_id))).
filter(sa.and_(tests.c.owner_id == 1,
sa.or_(options.c.someoption==None,
options.c.someoption==False))))
result = ["%d %s" % ( t.id,t.category.name ) for t in l]
eq_(result, [u'1 Some Category', u'3 Some Category'])
def test_dslish(self):
"""test the same as withjoinedload except using generative"""
Thing, tests, options = (self.classes.Thing,
self.tables.tests,
self.tables.options)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload('category'))
l = q.filter (
sa.and_(tests.c.owner_id == 1,
sa.or_(options.c.someoption == None,
options.c.someoption == False))
).outerjoin('owner_option')
result = ["%d %s" % ( t.id,t.category.name ) for t in l]
eq_(result, [u'1 Some Category', u'3 Some Category'])
@testing.crashes('sybase', 'FIXME: unknown, verify not fails_on')
def test_without_outerjoin_literal(self):
Thing, tests, false = (self.classes.Thing,
self.tables.tests,
self.other.false)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload('category'))
l = (q.filter(
(tests.c.owner_id==1) &
('options.someoption is null or options.someoption=%s' % false)).
join('owner_option'))
result = ["%d %s" % ( t.id,t.category.name ) for t in l]
eq_(result, [u'3 Some Category'])
def test_withoutouterjoin(self):
Thing, tests, options = (self.classes.Thing,
self.tables.tests,
self.tables.options)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload('category'))
l = q.filter(
(tests.c.owner_id==1) &
((options.c.someoption==None) | (options.c.someoption==False))
).join('owner_option')
result = ["%d %s" % ( t.id,t.category.name ) for t in l]
eq_(result, [u'3 Some Category'])
class EagerTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('left', metadata,
Column('id', Integer, ForeignKey('middle.id'), primary_key=True),
Column('data', String(50), primary_key=True))
Table('middle', metadata,
Column('id', Integer, primary_key = True, test_needs_autoincrement=True),
Column('data', String(50)))
Table('right', metadata,
Column('id', Integer, ForeignKey('middle.id'), primary_key=True),
Column('data', String(50), primary_key=True))
@classmethod
def setup_classes(cls):
class Left(cls.Basic):
def __init__(self, data):
self.data = data
class Middle(cls.Basic):
def __init__(self, data):
self.data = data
class Right(cls.Basic):
def __init__(self, data):
self.data = data
@classmethod
def setup_mappers(cls):
Right, Middle, middle, right, left, Left = (cls.classes.Right,
cls.classes.Middle,
cls.tables.middle,
cls.tables.right,
cls.tables.left,
cls.classes.Left)
# set up bi-directional eager loads
mapper(Left, left)
mapper(Right, right)
mapper(Middle, middle, properties=dict(
left=relationship(Left,
lazy='joined',
backref=backref('middle',lazy='joined')),
right=relationship(Right,
lazy='joined',
backref=backref('middle', lazy='joined')))),
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_eager_terminate(self):
"""Eager query generation does not include the same mapper's table twice.
Or, that bi-directional eager loads dont include each other in eager
query generation.
"""
Middle, Right, Left = (self.classes.Middle,
self.classes.Right,
self.classes.Left)
p = Middle('m1')
p.left.append(Left('l1'))
p.right.append(Right('r1'))
session = create_session()
session.add(p)
session.flush()
session.expunge_all()
obj = session.query(Left).filter_by(data='l1').one()
class EagerTest3(fixtures.MappedTest):
"""Eager loading combined with nested SELECT statements, functions, and aggregates."""
@classmethod
def define_tables(cls, metadata):
Table('datas', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('a', Integer, nullable=False))
Table('foo', metadata,
Column('data_id', Integer, ForeignKey('datas.id'),primary_key=True),
Column('bar', Integer))
Table('stats', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data_id', Integer, ForeignKey('datas.id')),
Column('somedata', Integer, nullable=False ))
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
class Foo(cls.Basic):
pass
class Stat(cls.Basic):
pass
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_nesting_with_functions(self):
Stat, Foo, stats, foo, Data, datas = (self.classes.Stat,
self.classes.Foo,
self.tables.stats,
self.tables.foo,
self.classes.Data,
self.tables.datas)
mapper(Data, datas)
mapper(Foo, foo, properties={
'data': relationship(Data,backref=backref('foo',uselist=False))})
mapper(Stat, stats, properties={
'data':relationship(Data)})
session = create_session()
data = [Data(a=x) for x in range(5)]
session.add_all(data)
session.add_all((
Stat(data=data[0], somedata=1),
Stat(data=data[1], somedata=2),
Stat(data=data[2], somedata=3),
Stat(data=data[3], somedata=4),
Stat(data=data[4], somedata=5),
Stat(data=data[0], somedata=6),
Stat(data=data[1], somedata=7),
Stat(data=data[2], somedata=8),
Stat(data=data[3], somedata=9),
Stat(data=data[4], somedata=10)))
session.flush()
arb_data = sa.select(
[stats.c.data_id, sa.func.max(stats.c.somedata).label('max')],
stats.c.data_id <= 5,
group_by=[stats.c.data_id])
arb_result = arb_data.execute().fetchall()
# order the result list descending based on 'max'
arb_result.sort(key = lambda a: a['max'], reverse=True)
# extract just the "data_id" from it
arb_result = [row['data_id'] for row in arb_result]
arb_data = arb_data.alias('arb')
# now query for Data objects using that above select, adding the
# "order by max desc" separately
q = (session.query(Data).
options(sa.orm.joinedload('foo')).
select_from(datas.join(arb_data, arb_data.c.data_id == datas.c.id)).
order_by(sa.desc(arb_data.c.max)).
limit(10))
# extract "data_id" from the list of result objects
verify_result = [d.id for d in q]
eq_(verify_result, arb_result)
class EagerTest4(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('departments', metadata,
Column('department_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)))
Table('employees', metadata,
Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('department_id', Integer,
ForeignKey('departments.department_id')))
@classmethod
def setup_classes(cls):
class Department(cls.Basic):
pass
class Employee(cls.Basic):
pass
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_basic(self):
Department, Employee, employees, departments = (self.classes.Department,
self.classes.Employee,
self.tables.employees,
self.tables.departments)
mapper(Employee, employees)
mapper(Department, departments, properties=dict(
employees=relationship(Employee,
lazy='joined',
backref='department')))
d1 = Department(name='One')
for e in 'Jim', 'Jack', 'John', 'Susan':
d1.employees.append(Employee(name=e))
d2 = Department(name='Two')
for e in 'Joe', 'Bob', 'Mary', 'Wally':
d2.employees.append(Employee(name=e))
sess = create_session()
sess.add_all((d1, d2))
sess.flush()
q = (sess.query(Department).
join('employees').
filter(Employee.name.startswith('J')).
distinct().
order_by(sa.desc(Department.name)))
eq_(q.count(), 2)
assert q[0] is d2
class EagerTest5(fixtures.MappedTest):
"""Construction of AliasedClauses for the same eager load property but different parent mappers, due to inheritance."""
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('uid', String(30), primary_key=True),
Column('x', String(30)))
Table('derived', metadata,
Column('uid', String(30), ForeignKey('base.uid'), primary_key=True),
Column('y', String(30)))
Table('derivedII', metadata,
Column('uid', String(30), ForeignKey('base.uid'), primary_key=True),
Column('z', String(30)))
Table('comments', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('uid', String(30), ForeignKey('base.uid')),
Column('comment', String(30)))
@classmethod
def setup_classes(cls):
class Base(cls.Basic):
def __init__(self, uid, x):
self.uid = uid
self.x = x
class Derived(Base):
def __init__(self, uid, x, y):
self.uid = uid
self.x = x
self.y = y
class DerivedII(Base):
def __init__(self, uid, x, z):
self.uid = uid
self.x = x
self.z = z
class Comment(cls.Basic):
def __init__(self, uid, comment):
self.uid = uid
self.comment = comment
def test_basic(self):
Comment, Derived, derived, comments, DerivedII, Base, base, derivedII = (self.classes.Comment,
self.classes.Derived,
self.tables.derived,
self.tables.comments,
self.classes.DerivedII,
self.classes.Base,
self.tables.base,
self.tables.derivedII)
commentMapper = mapper(Comment, comments)
baseMapper = mapper(Base, base, properties=dict(
comments=relationship(Comment, lazy='joined',
cascade='all, delete-orphan')))
mapper(Derived, derived, inherits=baseMapper)
mapper(DerivedII, derivedII, inherits=baseMapper)
sess = create_session()
d = Derived('uid1', 'x', 'y')
d.comments = [Comment('uid1', 'comment')]
d2 = DerivedII('uid2', 'xx', 'z')
d2.comments = [Comment('uid2', 'comment')]
sess.add_all((d, d2))
sess.flush()
sess.expunge_all()
# this eager load sets up an AliasedClauses for the "comment"
# relationship, then stores it in clauses_by_lead_mapper[mapper for
# Derived]
d = sess.query(Derived).get('uid1')
sess.expunge_all()
assert len([c for c in d.comments]) == 1
# this eager load sets up an AliasedClauses for the "comment"
# relationship, and should store it in clauses_by_lead_mapper[mapper
# for DerivedII]. the bug was that the previous AliasedClause create
# prevented this population from occurring.
d2 = sess.query(DerivedII).get('uid2')
sess.expunge_all()
# object is not in the session; therefore the lazy load cant trigger
# here, eager load had to succeed
assert len([c for c in d2.comments]) == 1
class EagerTest6(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('design_types', metadata,
Column('design_type_id', Integer, primary_key=True, test_needs_autoincrement=True))
Table('design', metadata,
Column('design_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('design_type_id', Integer,
ForeignKey('design_types.design_type_id')))
Table('parts', metadata,
Column('part_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('design_id', Integer, ForeignKey('design.design_id')),
Column('design_type_id', Integer,
ForeignKey('design_types.design_type_id')))
Table('inherited_part', metadata,
Column('ip_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('part_id', Integer, ForeignKey('parts.part_id')),
Column('design_id', Integer, ForeignKey('design.design_id')))
@classmethod
def setup_classes(cls):
class Part(cls.Basic):
pass
class Design(cls.Basic):
pass
class DesignType(cls.Basic):
pass
class InheritedPart(cls.Basic):
pass
def test_one(self):
Part, inherited_part, design_types, DesignType, parts, design, Design, InheritedPart = (self.classes.Part,
self.tables.inherited_part,
self.tables.design_types,
self.classes.DesignType,
self.tables.parts,
self.tables.design,
self.classes.Design,
self.classes.InheritedPart)
p_m = mapper(Part, parts)
mapper(InheritedPart, inherited_part, properties=dict(
part=relationship(Part, lazy='joined')))
d_m = mapper(Design, design, properties=dict(
inheritedParts=relationship(InheritedPart,
cascade="all, delete-orphan",
backref="design")))
mapper(DesignType, design_types)
d_m.add_property(
"type", relationship(DesignType, lazy='joined', backref="designs"))
p_m.add_property(
"design", relationship(
Design, lazy='joined',
backref=backref("parts", cascade="all, delete-orphan")))
d = Design()
sess = create_session()
sess.add(d)
sess.flush()
sess.expunge_all()
x = sess.query(Design).get(1)
x.inheritedParts
class EagerTest7(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('companies', metadata,
Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('company_name', String(40)))
Table('addresses', metadata,
Column('address_id', Integer, primary_key=True,test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey("companies.company_id")),
Column('address', String(40)))
Table('phone_numbers', metadata,
Column('phone_id', Integer, primary_key=True,test_needs_autoincrement=True),
Column('address_id', Integer, ForeignKey('addresses.address_id')),
Column('type', String(20)),
Column('number', String(10)))
Table('invoices', metadata,
Column('invoice_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey("companies.company_id")),
Column('date', sa.DateTime))
@classmethod
def setup_classes(cls):
class Company(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Phone(cls.Comparable):
pass
class Invoice(cls.Comparable):
pass
def test_load_m2o_attached_to_o2(self):
"""
Tests eager load of a many-to-one attached to a one-to-many. this
testcase illustrated the bug, which is that when the single Company is
loaded, no further processing of the rows occurred in order to load
the Company's second Address object.
"""
addresses, invoices, Company, companies, Invoice, Address = (self.tables.addresses,
self.tables.invoices,
self.classes.Company,
self.tables.companies,
self.classes.Invoice,
self.classes.Address)
mapper(Address, addresses)
mapper(Company, companies, properties={
'addresses' : relationship(Address, lazy='joined')})
mapper(Invoice, invoices, properties={
'company': relationship(Company, lazy='joined')})
a1 = Address(address='a1 address')
a2 = Address(address='a2 address')
c1 = Company(company_name='company 1', addresses=[a1, a2])
i1 = Invoice(date=datetime.datetime.now(), company=c1)
session = create_session()
session.add(i1)
session.flush()
company_id = c1.company_id
invoice_id = i1.invoice_id
session.expunge_all()
c = session.query(Company).get(company_id)
session.expunge_all()
i = session.query(Invoice).get(invoice_id)
def go():
eq_(c, i.company)
eq_(c.addresses, i.company.addresses)
self.assert_sql_count(testing.db, go, 0)
class EagerTest8(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('prj', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('created', sa.DateTime ),
Column('title', sa.Unicode(100)))
Table('task', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('status_id', Integer,
ForeignKey('task_status.id'), nullable=False),
Column('title', sa.Unicode(100)),
Column('task_type_id', Integer ,
ForeignKey('task_type.id'), nullable=False),
Column('prj_id', Integer , ForeignKey('prj.id'), nullable=False))
Table('task_status', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True))
Table('task_type', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True))
Table('msg', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('posted', sa.DateTime, index=True,),
Column('type_id', Integer, ForeignKey('msg_type.id')),
Column('task_id', Integer, ForeignKey('task.id')))
Table('msg_type', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', sa.Unicode(20)),
Column('display_name', sa.Unicode(20)))
@classmethod
def fixtures(cls):
return dict(
prj=(('id',),
(1,)),
task_status=(('id',),
(1,)),
task_type=(('id',),
(1,),),
task=(('title', 'task_type_id', 'status_id', 'prj_id'),
(u'task 1', 1, 1, 1)))
@classmethod
def setup_classes(cls):
class Task_Type(cls.Comparable):
pass
class Joined(cls.Comparable):
pass
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_nested_joins(self):
task, Task_Type, Joined, prj, task_type, msg = (self.tables.task,
self.classes.Task_Type,
self.classes.Joined,
self.tables.prj,
self.tables.task_type,
self.tables.msg)
# this is testing some subtle column resolution stuff,
# concerning corresponding_column() being extremely accurate
# as well as how mapper sets up its column properties
mapper(Task_Type, task_type)
tsk_cnt_join = sa.outerjoin(prj, task, task.c.prj_id==prj.c.id)
j = sa.outerjoin(task, msg, task.c.id==msg.c.task_id)
jj = sa.select([ task.c.id.label('task_id'),
sa.func.count(msg.c.id).label('props_cnt')],
from_obj=[j],
group_by=[task.c.id]).alias('prop_c_s')
jjj = sa.join(task, jj, task.c.id == jj.c.task_id)
mapper(Joined, jjj, properties=dict(
type=relationship(Task_Type, lazy='joined')))
session = create_session()
eq_(session.query(Joined).limit(10).offset(0).one(),
Joined(id=1, title=u'task 1', props_cnt=0))
class EagerTest9(fixtures.MappedTest):
"""Test the usage of query options to eagerly load specific paths.
This relies upon the 'path' construct used by PropertyOption to relate
LoaderStrategies to specific paths, as well as the path state maintained
throughout the query setup/mapper instances process.
"""
@classmethod
def define_tables(cls, metadata):
Table('accounts', metadata,
Column('account_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(40)))
Table('transactions', metadata,
Column('transaction_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(40)))
Table('entries', metadata,
Column('entry_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(40)),
Column('account_id', Integer,
ForeignKey('accounts.account_id')),
Column('transaction_id', Integer,
ForeignKey('transactions.transaction_id')))
@classmethod
def setup_classes(cls):
class Account(cls.Basic):
pass
class Transaction(cls.Basic):
pass
class Entry(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Account, Transaction, transactions, accounts, entries, Entry = (cls.classes.Account,
cls.classes.Transaction,
cls.tables.transactions,
cls.tables.accounts,
cls.tables.entries,
cls.classes.Entry)
mapper(Account, accounts)
mapper(Transaction, transactions)
mapper(Entry, entries, properties=dict(
account=relationship(Account,
uselist=False,
backref=backref('entries', lazy='select',
order_by=entries.c.entry_id)),
transaction=relationship(Transaction,
uselist=False,
backref=backref('entries', lazy='joined',
order_by=entries.c.entry_id))))
@testing.fails_on('maxdb', 'FIXME: unknown')
def test_joinedload_on_path(self):
Entry, Account, Transaction = (self.classes.Entry,
self.classes.Account,
self.classes.Transaction)
session = create_session()
tx1 = Transaction(name='tx1')
tx2 = Transaction(name='tx2')
acc1 = Account(name='acc1')
ent11 = Entry(name='ent11', account=acc1, transaction=tx1)
ent12 = Entry(name='ent12', account=acc1, transaction=tx2)
acc2 = Account(name='acc2')
ent21 = Entry(name='ent21', account=acc2, transaction=tx1)
ent22 = Entry(name='ent22', account=acc2, transaction=tx2)
session.add(acc1)
session.flush()
session.expunge_all()
def go():
# load just the first Account. eager loading will actually load
# all objects saved thus far, but will not eagerly load the
# "accounts" off the immediate "entries"; only the "accounts" off
# the entries->transaction->entries
acc = (session.query(Account).
options(sa.orm.joinedload_all('entries.transaction.entries.account')).
order_by(Account.account_id)).first()
# no sql occurs
eq_(acc.name, 'acc1')
eq_(acc.entries[0].transaction.entries[0].account.name, 'acc1')
eq_(acc.entries[0].transaction.entries[1].account.name, 'acc2')
# lazyload triggers but no sql occurs because many-to-one uses
# cached query.get()
for e in acc.entries:
assert e.account is acc
self.assert_sql_count(testing.db, go, 1)
| rclmenezes/sqlalchemy | test/orm/test_assorted_eager.py | Python | mit | 33,140 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2011 Rob G. Healey <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GExiv2
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gui.listmodel import ListModel
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.utils.place import conv_lat_lon
from fractions import Fraction
from gramps.gen.lib import Date
from gramps.gen.datehandler import displayer
from datetime import datetime
def format_datetime(datestring):
"""
Convert an exif timestamp into a string for display, using the
standard Gramps date format.
"""
try:
timestamp = datetime.strptime(datestring, '%Y:%m:%d %H:%M:%S')
except ValueError:
return _('Invalid format')
date_part = Date()
date_part.set_yr_mon_day(timestamp.year, timestamp.month, timestamp.day)
date_str = displayer.display(date_part)
time_str = _('%(hr)02d:%(min)02d:%(sec)02d') % {'hr': timestamp.hour,
'min': timestamp.minute,
'sec': timestamp.second}
return _('%(date)s %(time)s') % {'date': date_str, 'time': time_str}
def format_gps(raw_dms, nsew):
"""
Convert raw degrees, minutes, seconds and a direction
reference into a string for display.
"""
value = 0.0
divisor = 1.0
for val in raw_dms.split(' '):
try:
num = float(val.split('/')[0]) / float(val.split('/')[1])
except (ValueError, IndexError):
value = None
break
value += num / divisor
divisor *= 60
if nsew == 'N':
result = conv_lat_lon(str(value), '0', 'DEG')[0]
elif nsew == 'S':
result = conv_lat_lon('-' + str(value), '0', 'DEG')[0]
elif nsew == 'E':
result = conv_lat_lon('0', str(value), 'DEG')[1]
elif nsew == 'W':
result = conv_lat_lon('0', '-' + str(value), 'DEG')[1]
else:
result = None
return result if result is not None else _('Invalid format')
DESCRIPTION = _('Description')
IMAGE = _('Image')
CAMERA = _('Camera')
GPS = _('GPS')
ADVANCED = _('Advanced')
TAGS = [(DESCRIPTION, 'Exif.Image.ImageDescription', None, None),
(DESCRIPTION, 'Exif.Image.Artist', None, None),
(DESCRIPTION, 'Exif.Image.Copyright', None, None),
(DESCRIPTION, 'Exif.Photo.DateTimeOriginal', None, format_datetime),
(DESCRIPTION, 'Exif.Photo.DateTimeDigitized', None, format_datetime),
(DESCRIPTION, 'Exif.Image.DateTime', None, format_datetime),
(DESCRIPTION, 'Exif.Image.TimeZoneOffset', None, None),
(DESCRIPTION, 'Exif.Image.XPSubject', None, None),
(DESCRIPTION, 'Exif.Image.XPComment', None, None),
(DESCRIPTION, 'Exif.Image.XPKeywords', None, None),
(DESCRIPTION, 'Exif.Image.Rating', None, None),
(IMAGE, 'Exif.Image.DocumentName', None, None),
(IMAGE, 'Exif.Photo.PixelXDimension', None, None),
(IMAGE, 'Exif.Photo.PixelYDimension', None, None),
(IMAGE, 'Exif.Image.XResolution', 'Exif.Image.ResolutionUnit', None),
(IMAGE, 'Exif.Image.YResolution', 'Exif.Image.ResolutionUnit', None),
(IMAGE, 'Exif.Image.Orientation', None, None),
(IMAGE, 'Exif.Photo.ColorSpace', None, None),
(IMAGE, 'Exif.Image.YCbCrPositioning', None, None),
(IMAGE, 'Exif.Photo.ComponentsConfiguration', None, None),
(IMAGE, 'Exif.Image.Compression', None, None),
(IMAGE, 'Exif.Photo.CompressedBitsPerPixel', None, None),
(IMAGE, 'Exif.Image.PhotometricInterpretation', None, None),
(CAMERA, 'Exif.Image.Make', None, None),
(CAMERA, 'Exif.Image.Model', None, None),
(CAMERA, 'Exif.Photo.FNumber', None, None),
(CAMERA, 'Exif.Photo.ExposureTime', None, None),
(CAMERA, 'Exif.Photo.ISOSpeedRatings', None, None),
(CAMERA, 'Exif.Photo.FocalLength', None, None),
(CAMERA, 'Exif.Photo.FocalLengthIn35mmFilm', None, None),
(CAMERA, 'Exif.Photo.MaxApertureValue', None, None),
(CAMERA, 'Exif.Photo.MeteringMode', None, None),
(CAMERA, 'Exif.Photo.ExposureProgram', None, None),
(CAMERA, 'Exif.Photo.ExposureBiasValue', None, None),
(CAMERA, 'Exif.Photo.Flash', None, None),
(CAMERA, 'Exif.Image.FlashEnergy', None, None),
(CAMERA, 'Exif.Image.SelfTimerMode', None, None),
(CAMERA, 'Exif.Image.SubjectDistance', None, None),
(CAMERA, 'Exif.Photo.Contrast', None, None),
(CAMERA, 'Exif.Photo.LightSource', None, None),
(CAMERA, 'Exif.Photo.Saturation', None, None),
(CAMERA, 'Exif.Photo.Sharpness', None, None),
(CAMERA, 'Exif.Photo.WhiteBalance', None, None),
(CAMERA, 'Exif.Photo.DigitalZoomRatio', None, None),
(GPS, 'Exif.GPSInfo.GPSLatitude',
'Exif.GPSInfo.GPSLatitudeRef', format_gps),
(GPS, 'Exif.GPSInfo.GPSLongitude',
'Exif.GPSInfo.GPSLongitudeRef', format_gps),
(GPS, 'Exif.GPSInfo.GPSAltitude',
'Exif.GPSInfo.GPSAltitudeRef', None),
(GPS, 'Exif.GPSInfo.GPSTimeStamp', None, None),
(GPS, 'Exif.GPSInfo.GPSSatellites', None, None),
(ADVANCED, 'Exif.Image.Software', None, None),
(ADVANCED, 'Exif.Photo.ImageUniqueID', None, None),
(ADVANCED, 'Exif.Image.CameraSerialNumber', None, None),
(ADVANCED, 'Exif.Photo.ExifVersion', None, None),
(ADVANCED, 'Exif.Photo.FlashpixVersion', None, None),
(ADVANCED, 'Exif.Image.ExifTag', None, None),
(ADVANCED, 'Exif.Image.GPSTag', None, None),
(ADVANCED, 'Exif.Image.BatteryLevel', None, None)]
class MetadataView(Gtk.TreeView):
def __init__(self):
Gtk.TreeView.__init__(self)
self.sections = {}
titles = [(_('Key'), 1, 235),
(_('Value'), 2, 325)]
self.model = ListModel(self, titles, list_mode="tree")
def display_exif_tags(self, full_path):
"""
Display the exif tags.
"""
self.sections = {}
self.model.clear()
if not os.path.exists(full_path):
return False
retval = False
with open(full_path, 'rb') as fd:
try:
metadata = GExiv2.Metadata(self.fd)
get_human = metadata.get_tag_interpreted_string
for section, key, key2, func in TAGS:
if not key in metadata.get_exif_tags():
continue
if func is not None:
if key2 is None:
human_value = func(metadata[key])
else:
if key2 in metadata.get_exif_tags():
human_value = func(metadata[key], metadata[key2])
else:
human_value = func(metadata[key], None)
else:
human_value = get_human(key)
if key2 in metadata.get_exif_tags():
human_value += ' ' + get_human(key2)
label = metadata.get_tag_label(key)
node = self.__add_section(section)
if human_value is None:
human_value = ''
self.model.add((label, human_value), node=node)
self.model.tree.expand_all()
retval = self.model.count > 0
except:
pass
return retval
def __add_section(self, section):
"""
Add the section heading node to the model.
"""
if section not in self.sections:
node = self.model.add([section, ''])
self.sections[section] = node
else:
node = self.sections[section]
return node
def get_has_data(self, full_path):
"""
Return True if the gramplet has data, else return False.
"""
if not os.path.exists(full_path):
return False
with open(full_path, 'rb') as fd:
retval = False
try:
metadata = GExiv2.Metadata(fd)
for tag in TAGS:
if tag in metadata.get_exif_tags():
retval = True
break
except:
pass
return retval
| pmghalvorsen/gramps_branch | gramps/plugins/lib/libmetadata.py | Python | gpl-2.0 | 9,740 |
import random, presto, math, Numeric
def copyorb(old, new):
"""
copyorb(old, new):
Copy an orbitparams variable from 'old' to 'new'.
"""
new.p = old.p
new.e = old.e
new.x = old.x
new.w = old.w
new.t = old.t
new.wd = old.wd
new.pd = old.pd
return new
def fake_mspsr(companion='None', psrp=None, orbp=None,
orbx=None, orbe=None, orbw=None, orbt=None):
"""
fake_mspsr(companion='None'):
Generate random pulsar parameters.
Returns a psrparams structure.
Keyword Arguments:
companion -- the companion type ('WD', 'NS', 'BH', default = None)
Note: All of the following default to random values.
psrp -- pulsar period in sec.
orbp -- orbital period in sec.
orbx -- projected orbital semi-major axis in lt-sec.
orbe -- orbital eccentricity.
orbw -- argument of periapsis in degrees.
orbt -- time since last periapsis passage in sec.
"""
global mpsr, mc, mwd, mns, mbh
psr = presto.psrparams()
psr.jname = 'Fake PSR'
psr.bname = 'Fake PSR'
psr.ntype = 0; psr.ra2000 = 0.0; psr.dec2000 = 0.0
psr.dm = 0.0; psr.dist = 0.0
psr.pd = 0.0; psr.pdd = 0.0
psr.fd = 0.0; psr.fdd = 0.0
if not psrp: psr.p = random.uniform(0.002, 0.07)
else: psr.p = psrp
if psr.p < 0.03: psr.ntype = psr.ntype | 16
psr.f = 1.0 / psr.p
psr.fwhm = random.uniform(0.05, 0.5) * psr.p * 1000.0
psr.timepoch = 0.0
if companion=='WD':
mwd = random.gauss(0.25, 0.10)
mc = mwd;
if not orbe: psr.orb.e = random.expovariate(80.0)
else: psr.orb.e = orbe
ctype = 'WD'
psr.ntype = psr.ntype | 8
elif companion=='NS':
mns = random.gauss(1.35, 0.04)
mc = mns;
if not orbe: psr.orb.e = abs(random.uniform(0.1, 0.8))
else: psr.orb.e = orbe
ctype = 'NS'
psr.ntype = psr.ntype | 8
elif companion=='BH':
mbh = random.gauss(6.0, 1.0)
mc = mbh;
if not orbe: psr.orb.e = abs(random.uniform(0.1, 0.8))
else: psr.orb.e = orbe
ctype = 'BH'
psr.ntype = psr.ntype | 8
else:
psr.orb.p = 0.0; psr.orb.x = 0.0; psr.orb.e = 0.0;
psr.orb.w = 0.0; psr.orb.t = 0.0; psr.orb.pd = 0.0;
psr.orb.wd = 0.0;
mc = 0.0; ctype = 'None'
if ctype!='None':
# The following is from Thorsett and Chakrabarty, 1988.
mpsr = random.gauss(1.35, 0.04)
inc = math.acos(random.uniform(0.0, 1.0))
mf = (mc * math.sin(inc))**3 / (mc + mpsr)**2
if not orbp: psr.orb.p = random.uniform(20.0, 600.0) * 60.0
else: psr.orb.p = orbp
if not orbx: psr.orb.x = (mf * psr.orb.p**2
* 1.24764143192e-07)**(1.0 / 3.0)
else: psr.orb.x = orbx
if not orbw: psr.orb.w = random.uniform(0.0, 360.0)
else: psr.orb.w = orbw
if not orbt: psr.orb.t = random.uniform(0.0, psr.orb.p)
else: psr.orb.t = orbt
return psr
| pscholz/presto | python/orbitstuff.py | Python | gpl-2.0 | 3,069 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Initiative'
db.create_table(u'multilingual_initiatives_initiative', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('logo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'], null=True, blank=True)),
('start_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=160, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multilingual_orgs.Organization'], null=True, blank=True)),
))
db.send_create_signal(u'multilingual_initiatives', ['Initiative'])
# Adding model 'InitiativePluginModel'
db.create_table(u'cmsplugin_initiativepluginmodel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('display_type', self.gf('django.db.models.fields.CharField')(max_length=256)),
('initiative', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multilingual_initiatives.Initiative'])),
))
db.send_create_signal(u'multilingual_initiatives', ['InitiativePluginModel'])
# Adding model 'InitiativeTranslation'
db.create_table(u'multilingual_initiatives_initiativetranslation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=2000)),
('is_published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('language', self.gf('django.db.models.fields.CharField')(max_length=16)),
('initiative', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multilingual_initiatives.Initiative'])),
))
db.send_create_signal(u'multilingual_initiatives', ['InitiativeTranslation'])
def backwards(self, orm):
# Deleting model 'Initiative'
db.delete_table(u'multilingual_initiatives_initiative')
# Deleting model 'InitiativePluginModel'
db.delete_table(u'cmsplugin_initiativepluginmodel')
# Deleting model 'InitiativeTranslation'
db.delete_table(u'multilingual_initiatives_initiativetranslation')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'multilingual_initiatives.initiative': {
'Meta': {'object_name': 'Initiative'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '160', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multilingual_orgs.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'multilingual_initiatives.initiativepluginmodel': {
'Meta': {'object_name': 'InitiativePluginModel', 'db_table': "u'cmsplugin_initiativepluginmodel'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'initiative': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multilingual_initiatives.Initiative']"})
},
u'multilingual_initiatives.initiativetranslation': {
'Meta': {'object_name': 'InitiativeTranslation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multilingual_initiatives.Initiative']"}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
u'multilingual_orgs.organization': {
'Meta': {'object_name': 'Organization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['multilingual_initiatives'] | bitmazk/django-multilingual-initiatives | multilingual_initiatives/migrations/0001_initial.py | Python | mit | 13,860 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Assorted functions for working with ISBNs.
"""
# TODO: error-handling logic is correct?
__docformat__ = 'restructuredtext en'
### IMPORTS ###
### CONSTANTS & DEFINES ###
### IMPLEMENTATION ###
### TEST & DEBUG ###
def _doctest ():
import doctest
doctest.testmod()
### MAIN ###
if __name__ == '__main__':
_doctest()
### END ######################################################################
| agapow/biblio.idscheme | biblio/idscheme/isbnutils.py | Python | bsd-3-clause | 462 |
# -*- coding: utf-8 -*-
#
# MITprof documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 16 00:38:59 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MITprof'
copyright = u'2018, Gael Forget'
author = u'Gael Forget'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MITprofdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MITprof.tex', u'MITprof Documentation',
u'Gael Forget', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mitprof', u'MITprof Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MITprof', u'MITprof Documentation',
author, 'MITprof', 'One line description of project.',
'Miscellaneous'),
]
| gaelforget/MITprof | docs/conf.py | Python | mit | 5,065 |
# -*- coding: UTF-8 -*-
#! python3 # noqa E265
"""Usage from the repo root folder:
:Example:
.. code-block:: python
# for whole test
python -m unittest tests.test_translator
# for specific
python -m unittest tests.test_translator.TestIsogeoTranslator.test_translation_fr
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import logging
import unittest
from os import environ
from pathlib import Path
from sys import exit
from time import sleep
# 3rd party
from dotenv import load_dotenv
import urllib3
# module target
from isogeo_pysdk import Isogeo, IsogeoTranslator
# #############################################################################
# ######## Globals #################
# ##################################
if Path("dev.env").exists():
load_dotenv("dev.env", override=True)
# fixtures
METADATA_TEST_FIXTURE_UUID = environ.get("ISOGEO_FIXTURES_METADATA_COMPLETE")
li_contacts_fr = (
"Auteur",
"Point de contact",
"Administrateur",
"Distributeur",
"Créateur",
"Propriétaire",
"Analyste principal",
"Responsable du traitement",
"Éditeur (publication)",
"Fournisseur",
"Utilisateur",
)
li_contacts_en = (
"Author",
"Point of contact",
"Custodian",
"Distributor",
"Originator",
"Owner",
"Principal investigator",
"Processor",
"Publisher",
"Resource provider",
"User",
)
# #############################################################################
# ########## Classes ###############
# ##################################
class TestIsogeoTranslator(unittest.TestCase):
"""Test translation of specific words wihtin Isogeo API."""
# -- Standard methods --------------------------------------------------------
@classmethod
def setUpClass(cls):
"""Executed when module is loaded before any test."""
# checks
if not environ.get("ISOGEO_API_GROUP_CLIENT_ID") or not environ.get(
"ISOGEO_API_GROUP_CLIENT_SECRET"
):
logging.critical("No API credentials set as env variables.")
exit()
else:
pass
# class vars and attributes
cls.li_fixtures_to_delete = []
# ignore warnings related to the QA self-signed cert
if environ.get("ISOGEO_PLATFORM").lower() == "qa":
urllib3.disable_warnings()
# API connection
cls.isogeo = Isogeo(
auth_mode="group",
client_id=environ.get("ISOGEO_API_GROUP_CLIENT_ID"),
client_secret=environ.get("ISOGEO_API_GROUP_CLIENT_SECRET"),
auto_refresh_url="{}/oauth/token".format(environ.get("ISOGEO_ID_URL")),
platform=environ.get("ISOGEO_PLATFORM", "qa"),
)
# getting a token
cls.isogeo.connect()
# get contacts on a metadata
cls.fixture_metadata_existing = cls.isogeo.metadata.get(
metadata_id=METADATA_TEST_FIXTURE_UUID, include=("contacts",)
)
cls.li_contacts_roles = [
i.get("role") for i in cls.fixture_metadata_existing.contacts
]
# standard methods
def setUp(self):
"""Executed before each test."""
def tearDown(self):
"""Executed after each test."""
sleep(0.5)
@classmethod
def tearDownClass(cls):
"""Executed after the last test."""
# close sessions
cls.isogeo.close()
# -- TESTS ---------------------------------------------------------
# tests
def test_translation_fr(self):
"""Some French translations get from the subclass."""
tr = IsogeoTranslator("FR")
# contacts roles
for role in self.li_contacts_roles:
self.assertIn(tr.tr("roles", role), li_contacts_fr)
def test_translation_en(self):
"""Some English translations get from the subclass."""
tr = IsogeoTranslator("EN")
# contacts roles
for role in self.li_contacts_roles:
self.assertIn(tr.tr("roles", role), li_contacts_en)
def test_translation_bad(self):
"""Test translation of inexistent word.."""
tr = IsogeoTranslator("EN")
with self.assertRaises(ValueError):
for role in self.li_contacts_roles:
self.assertIn(tr.tr("BAD_SUBDOMAIN", role), li_contacts_en)
# #############################################################################
# ######## Standalone ##############
# ##################################
if __name__ == "__main__":
unittest.main()
| Guts/isogeo-api-py-minsdk | tests/test_translator.py | Python | gpl-3.0 | 4,673 |
from gloss import models, exceptions, utils
from gloss.conf import settings
from gloss.external_api import post_message_for_identifier
from gloss import message_type
"""
the information source is a data api, that by default gets
from the database if possible, otherwise it queries an external
service
"""
# TODO, we can just inherit this now...
if getattr(settings, "MOCK_EXTERNAL_API", None):
post_message_for_identifier = utils.import_from_string(
settings.MOCK_EXTERNAL_API
)
def get_information_source():
if hasattr(settings, "INFORMATION_SOURCE"):
info_cls = utils.import_from_string(settings.INFORMATION_SOURCE)
return info_cls()
else:
return InformationSource()
class InformationSource(object):
def check_or_fetch_patient(self, session, issuing_source, identifier):
""" checks if a patient exists locally, and if not
queries upstream if possible
"""
patient_already_exists = models.Patient.query_from_identifier(
identifier, issuing_source, session
).count()
if patient_already_exists:
return True
if not patient_already_exists:
if settings.USE_EXTERNAL_LOOKUP:
post_message_for_identifier(identifier)
return True
return False
def result_information(self, issuing_source, identifier):
with models.session_scope() as session:
if self.check_or_fetch_patient(
session, issuing_source, identifier
):
messages = models.Result.to_messages(
identifier, issuing_source, session
)
return message_type.construct_message_container(
messages, identifier, issuing_source
)
else:
raise exceptions.PatientNotFound(
"We can't find any patients with that identifier {}".format(
identifier
)
)
def patient_information(self, issuing_source, identifier):
with models.session_scope() as session:
if self.check_or_fetch_patient(
session, issuing_source, identifier
):
return models.patient_to_message_container(
identifier, issuing_source, session
)
else:
raise exceptions.PatientNotFound(
"We can't find any patients with that identifier {}".format(
identifier
)
)
| openhealthcare/gloss | gloss/information_source.py | Python | gpl-3.0 | 2,638 |
import socket
import nntplib
import time
import unittest
try:
import threading
except ImportError:
threading = None
from unittest import TestCase
from test import test_support
HOST = test_support.HOST
def server(evt, serv, evil=False):
serv.listen(5)
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
if evil:
conn.send("1 I'm too long response" * 3000 + "\n")
else:
conn.send("1 I'm OK response\n")
conn.close()
finally:
serv.close()
evt.set()
class BaseServerTest(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(
target=server,
args=(self.evt, self.sock, self.evil)).start()
time.sleep(.1)
def tearDown(self):
self.evt.wait()
@unittest.skipUnless(threading, 'threading required')
class ServerTests(BaseServerTest):
evil = False
def test_basic_connect(self):
nntp = nntplib.NNTP('localhost', self.port)
nntp.sock.close()
@unittest.skipUnless(threading, 'threading required')
class EvilServerTests(BaseServerTest):
evil = True
def test_too_long_line(self):
self.assertRaises(nntplib.NNTPDataError,
nntplib.NNTP, 'localhost', self.port)
def test_main(verbose=None):
test_support.run_unittest(EvilServerTests)
test_support.run_unittest(ServerTests)
if __name__ == '__main__':
test_main()
| j5shi/Thruster | pylibs/test/test_nntplib.py | Python | gpl-2.0 | 1,725 |
#coding=utf-8
import subprocess
# 执行 shell command,一次只能执行一个命令
def executeCommand(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output = process.communicate()[0].strip()
print output
# Test
executeCommand("echo 1 && echo 2")
# Open startup floder
executeCommand("explorer \"%UserProfile%\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\"")
| Ju2ender/Python-E | x/ShellUtil.py | Python | mit | 430 |
from django.shortcuts import render
import adlt.helppages.services as helppages_services
def index(request):
parts = helppages_services.render('index')
return render(request, 'helppages/help_page.html', dict(
parts,
active_topmenu_item='help-index',
))
def help_page(request, path):
parts = helppages_services.render(path)
return render(request, 'helppages/help_page.html', dict(
parts,
active_topmenu_item='help-index',
))
| sirex/atviriduomenys.lt | adlt/helppages/views.py | Python | agpl-3.0 | 485 |
# Author: Mr_Orange <[email protected]>
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import urlparse
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.helpers import sanitizeSceneName
class TorrentDayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "TorrentDay")
self.supportsBacklog = True
self.enabled = False
self._uid = None
self._hash = None
self.username = None
self.password = None
self.ratio = None
self.freeleech = False
self.minseed = None
self.minleech = None
self.cache = TorrentDayCache(self)
self.urls = {'base_url': 'https://torrentday.eu',
'login': 'https://torrentday.eu/torrents/',
'search': 'https://torrentday.eu/V3/API/API.php',
'download': 'https://torrentday.eu/download.php/%s/%s'
}
self.url = self.urls['base_url']
self.cookies = None
self.categories = {'Season': {'c14': 1}, 'Episode': {'c2': 1, 'c26': 1, 'c7': 1, 'c24': 1},
'RSS': {'c2': 1, 'c26': 1, 'c7': 1, 'c24': 1, 'c14': 1}}
def isEnabled(self):
return self.enabled
def imageName(self):
return 'torrentday.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
if self._uid and self._hash:
requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
else:
login_params = {'username': self.username,
'password': self.password,
'submit.x': 0,
'submit.y': 0
}
if not self.session:
self.session = requests.Session()
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
return False
if re.search('You tried too often', response.text):
logger.log(u'Too many login access for ' + self.name + ', can''t retrive any data', logger.ERROR)
return False
if response.status_code == 401:
logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
return False
try:
if requests.utils.dict_from_cookiejar(self.session.cookies)['uid'] and requests.utils.dict_from_cookiejar(self.session.cookies)['pass']:
self._uid = requests.utils.dict_from_cookiejar(self.session.cookies)['uid']
self._hash = requests.utils.dict_from_cookiejar(self.session.cookies)['pass']
self.cookies = {'uid': self._uid,
'pass': self._hash
}
return True
except:
pass
logger.log(u'Unable to obtain cookie for TorrentDay', logger.ERROR)
return False
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
freeleech = '&free=on' if self.freeleech else ''
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
logger.log(u"Search string: " + search_string, logger.DEBUG)
search_string = '+'.join(search_string.split())
post_data = dict({'/browse.php?': None, 'cata': 'yes', 'jxt': 8, 'jxw': 'b', 'search': search_string},
**self.categories[mode])
if self.freeleech:
post_data.update({'free': 'on'})
parsedJSON = self.getURL(self.urls['search'], post_data=post_data, json=True)
if not parsedJSON:
continue
try:
torrents = parsedJSON.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except:
continue
for torrent in torrents:
title = re.sub(r"\[.*\=.*\].*\[/.*\]", "", torrent['name'])
url = self.urls['download'] % ( torrent['id'], torrent['fname'] )
seeders = int(torrent['seed'])
leechers = int(torrent['leech'])
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
if not title or not url:
continue
item = title, url, seeders, leechers
items[mode].append(item)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item[0], item[1]
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class TorrentDayCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll IPTorrents every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = TorrentDayProvider()
| bckwltn/SickRage | sickbeard/providers/torrentday.py | Python | gpl-3.0 | 10,497 |
try:
import urllib.parse as urlparse
except ImportError:
# py2
import urlparse
import tornado.httpclient
import tornado.web
# headers to remove as of HTTP 1.1 RFC2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
hop_by_hop_headers = set([
'connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade',
])
class ProxyHandler(tornado.web.RequestHandler):
def __init__(self, *args, **kw):
self.proxy_whitelist = kw.pop('proxy_whitelist', None)
self.origin_whitelist = kw.pop('origin_whitelist', None)
super(ProxyHandler, self).__init__(*args, **kw)
def check_proxy_host(self, url_parts):
if self.proxy_whitelist is None:
return
url = '%s://%s' % (url_parts.scheme, url_parts.netloc)
if url in self.proxy_whitelist:
return
raise tornado.web.HTTPError(403)
def check_origin(self):
if self.origin_whitelist is None:
return
if 'Origin' not in self.request.headers:
raise tornado.web.HTTPError(403)
if self.request.headers['Origin'] not in self.origin_whitelist:
raise tornado.web.HTTPError(403)
def response_handler(self, response):
if response.error and not isinstance(response.error, tornado.httpclient.HTTPError):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
self.finish()
return
if response.code == 599:
# connection closed
self.set_status(502)
self.write('Bad gateway:\n' + str(response.error))
self.finish()
return
self.set_status(response.code)
# copy all but hop-by-hop headers
for header, v in response.headers.items():
if header.lower() not in hop_by_hop_headers:
self.set_header(header, v)
origin = self.request.headers.get('Origin', '*')
self.set_header('Access-Control-Allow-Origin', origin)
if self.request.method == 'OPTIONS':
if 'Access-Control-Request-Headers' in self.request.headers:
# allow all requested headers
self.set_header('Access-Control-Allow-Headers',
self.request.headers.get('Access-Control-Request-Headers', ''))
self.set_header('Access-Control-Allow-Methods',
response.headers.get('Allow', ''))
if response.code == 405:
# fake OPTIONS response when source doesn't support it
# as OPTIONS is required for CORS preflight requests.
# the 405 response should contain the supported methods
# in the Allow header.
self.set_status(200)
self.clear_header('Content-Length')
self.finish()
return
if response.body:
self.write(response.body)
self.finish()
@tornado.web.asynchronous
def request_handler(self, url):
url_parts = urlparse.urlparse(url)
self.request.headers['Host'] = url_parts.netloc
self.check_proxy_host(url_parts)
self.check_origin()
if self.request.query:
url = url + '?' + self.request.query
req = tornado.httpclient.HTTPRequest(
url=url,
method=self.request.method,
body=self.request.body,
headers=self.request.headers,
follow_redirects=False,
allow_nonstandard_methods=True,
use_gzip=False, # otherwise tornado will decode proxied data
)
client = tornado.httpclient.AsyncHTTPClient()
try:
client.fetch(req, self.response_handler)
except tornado.httpclient.HTTPError as e:
# pass regular HTTP errors from server to client
if hasattr(e, 'response') and e.response:
self.response_handler(e.response)
else:
raise
# alias HTTP methods to generic request handler
SUPPORTED_METHODS = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS']
get = request_handler
post = request_handler
put = request_handler
delete = request_handler
head = request_handler
options = request_handler
| olt/corsa | corsa/proxy.py | Python | mit | 4,393 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.