repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aubreystarktoller/django-babik-card-primitives | setup.py | 1 | 1665 | import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name="django-babik-card-primitives",
version="0.1.0",
description="Basic functionallity for handling credit card information",
author="Aubrey Stark-Toller",
author_email="[email protected]",
license="BSD",
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords="project django",
packages=["babik_card_primitives"],
install_requires = ["django>=1.8,<1.10"],
tests_require = ["pytest", "pytest-django", "pytest-cov", 'testfixtures'],
cmdclass = {'test': PyTest}
)
| bsd-3-clause | -7,242,708,815,215,698,000 | 32.979592 | 78 | 0.622222 | false |
simonkrogmann/planets | gui/object_3D.py | 1 | 4105 | # -*- coding: cp1252 -*-
import vector
import time
class Planet3D:
"""ein 3D-Objekt für das graphics-Modul, dass mit einem Planeten verbunden ist"""
def __init__(self, Parent, Planet):
self.Parent = Parent
self.Planet = Planet
self.Planet.Register(self)
self.Positions = [Planet["position"].Tuple()]
self.Trace = []
self.Color = Planet["color"]
self.TraceState = -1
self.Drawing = self.Parent.Canvas.create_oval(-5, -5, -6, -6, fill = Planet["color"],
outline = "")
self.Redraw()
def ResetTrace(self):
"""löscht die bisher gezeichnete Spur des Planeten"""
for Line in self.Trace:
self.Parent.Canvas.delete(Line.Drawing)
self.Parent.Drawings.remove(Line)
self.Trace = []
self.TraceState = -1
self.Positions = [self.Positions[-1]]
def Redraw(self):
"""zeichnet den Planeten neu"""
C = self.Parent.DisplayPosition(self.Positions[-1])
if C:
Diameter = self.Parent.DisplayDiameter(self.Positions[-1], self.Planet["diameter"])
Coordinates = (C[0] - Diameter, C[1] - Diameter, C[0] + Diameter, C[1] + Diameter)
self.Parent.Canvas.coords(self.Drawing, Coordinates)
else:
self.Parent.Canvas.coords(self.Drawing, -5, -5, -6, -6)
def Update(self, Tag, Value):
"""ändert die Zeichnung des Planeten entsprechend der Daten.
Mögliche Daten sind die Planetenattribute."""
if Tag == "position":
if type(Value) == tuple:
Tuple = Value
else:
Tuple = Value.Tuple()
if self.Planet["trace"] and self.Planet.Parent.Trace:
# fasst jeweils 5 Linien für die Spur zusammen
self.TraceState = (self.TraceState + 1) % 5
if not self.TraceState:
self.Trace.append(Line3D(self.Parent, self.Positions[-1], Tuple, self.Color))
self.Parent.Drawings.append(self.Trace[-1])
self.Positions.append(Tuple)
else:
self.Positions[-1] = Tuple
self.Trace[-1].End = Tuple
self.Trace[-1].Redraw()
else:
self.Positions = [Tuple]
self.Redraw()
elif Tag == "diameter":
self.Redraw()
elif Tag == "color":
self.SetColor(Value)
elif Tag == "trace" and not Value:
self.ResetTrace()
def SetColor(self, Color):
"""ändert die Planetenfarbe"""
self.Color = Color
self.Parent.Canvas.itemconfig(self.Drawing, fill = Color)
def Delete(self):
"""entfernt den Planeten aus der Zeichnung"""
for Line in self.Trace:
self.Parent.Canvas.delete(Line.Drawing)
self.Parent.Drawings.remove(Line)
self.Parent.Canvas.delete(self.Drawing)
self.Planet.Deregister(self)
def MidPoint(self):
"""gibt den Mittelpunkt des Planeten zurück"""
return self.Positions[-1]
class Line3D:
"""eine 3D-Linie für das graphics-Modul"""
def __init__(self, Parent, Begin, End, Color):
self.Parent = Parent
self.Begin = Begin
self.End = End
self.OnScreen = False
self.Drawing = self.Parent.Canvas.create_line(-5, -5, -5, -5, fill = Color)
self.Redraw()
def Redraw(self):
"""zeichnet die Linie neu"""
Coordinates = self.Parent.LineDisplayCoordinates(self.Begin, self.End)
if Coordinates != (-5,-5,-5,-5):
self.Parent.Canvas.coords(self.Drawing, Coordinates)
self.OnScreen = True
elif self.OnScreen:
self.OnScreen = False
self.Parent.Canvas.coords(self.Drawing, Coordinates)
def MidPoint(self):
"""gibt den Mittelpunkt der Linie zurück"""
return ((self.Begin[0] + self.End[0])/ 2, (self.Begin[1] + self.End[1])/ 2,
(self.Begin[2] + self.End[2])/ 2)
| mit | -6,019,683,214,109,623,000 | 37.009259 | 97 | 0.557613 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201805/product_package_item_service/get_product_package_items_for_product_package.py | 1 | 2302 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all product package items belonging to a product package.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
PRODUCT_PACKAGE_ID = 'INSERT_PRODUCT_PACKAGE_ID_HERE'
def main(client, product_package_id):
# Initialize appropriate service.
product_package_item_service = client.GetService(
'ProductPackageItemService', version='v201805')
# Create a statement to select product package items.
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('productPackageId = :productPackageId')
.WithBindVariable('productPackageId', product_package_id))
# Retrieve a small amount of product package items at a time, paging
# through until all product package items have been retrieved.
while True:
response = product_package_item_service.getProductPackageItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for product_package_item in response['results']:
# Print out some information for each product package item.
print('Product package item with ID "%d", product ID "%d", and product '
'package ID "%d" was found.\n' %
(product_package_item['id'], product_package_item['productId'],
product_package_item['productPackageId']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PRODUCT_PACKAGE_ID)
| apache-2.0 | 1,369,016,464,488,928,300 | 40.107143 | 80 | 0.715899 | false |
baidu/palo | build-support/run_clang_format.py | 2 | 5703 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modified from Apache Arrow project.
from __future__ import print_function
import lintutils
from subprocess import PIPE
import argparse
import difflib
import multiprocessing as mp
import sys
from functools import partial
# examine the output of clang-format and if changes are
# present assemble a (unified)patch of the difference
def _check_one_file(filename, formatted):
with open(filename, "rb") as reader:
original = reader.read()
if formatted != original:
# Run the equivalent of diff -u
diff = list(difflib.unified_diff(
original.decode('utf8').splitlines(True),
formatted.decode('utf8').splitlines(True),
fromfile=filename,
tofile="{} (after clang format)".format(
filename)))
else:
diff = None
return filename, diff
def _check_dir(arguments, source_dir, exclude_globs):
formatted_filenames = []
for path in lintutils.get_sources(source_dir, exclude_globs):
formatted_filenames.append(str(path))
if arguments.fix:
if not arguments.quiet:
print("\n".join(map(lambda x: "Formatting {}".format(x),
formatted_filenames)))
# Break clang-format invocations into chunks: each invocation formats
# 16 files. Wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, "-style=file", "-i"] + some
for some in lintutils.chunk(formatted_filenames, 16)
])
for returncode, stdout, stderr in results:
# if any clang-format reported a parse error, bubble it
if returncode != 0:
sys.exit(returncode)
else:
# run an instance of clang-format for each source file in parallel,
# then wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, "-style=file", filename]
for filename in formatted_filenames
], stdout=PIPE, stderr=PIPE)
checker_args = []
for filename, res in zip(formatted_filenames, results):
# if any clang-format reported a parse error, bubble it
returncode, stdout, stderr = res
if returncode != 0:
print(stderr)
sys.exit(returncode)
checker_args.append((filename, stdout))
error = False
pool = mp.Pool()
try:
# check the output from each invocation of clang-format in parallel
for filename, diff in pool.starmap(_check_one_file, checker_args):
if not arguments.quiet:
print("Checking {}".format(filename))
if diff:
print("{} had clang-format style issues".format(filename))
# Print out the diff to stderr
error = True
# pad with a newline
print(file=sys.stderr)
sys.stderr.writelines(diff)
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs clang-format on all of the source "
"files. If --fix is specified enforce format by "
"modifying in place, otherwise compare the output "
"with the existing file and output any necessary "
"changes as a patch in unified diff format")
parser.add_argument("--clang_format_binary",
required=True,
help="Path to the clang-format binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dirs",
required=True,
help="Comma-separated root directories of the source code")
parser.add_argument("--fix", default=False,
action="store_true",
help="If specified, will re-format the source "
"code instead of comparing the re-formatted "
"output, defaults to %(default)s")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
with open(arguments.exclude_globs) as f:
exclude_globs.extend(line.strip() for line in f)
for source_dir in arguments.source_dirs.split(','):
if len(source_dir) > 0:
_check_dir(arguments, source_dir, exclude_globs)
| apache-2.0 | -5,497,260,695,327,298,000 | 38.604167 | 83 | 0.60512 | false |
smithfarm/s3-tests | s3tests/functional/test_s3.py | 1 | 203153 | from cStringIO import StringIO
import boto.exception
import boto.s3.connection
import boto.s3.acl
import bunch
import datetime
import time
import email.utils
import isodate
import nose
import operator
import socket
import ssl
import os
import requests
import base64
import hmac
import sha
import pytz
import json
import httplib2
import threading
import itertools
import string
import random
import xml.etree.ElementTree as ET
from httplib import HTTPConnection, HTTPSConnection
from urlparse import urlparse
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .utils import assert_raises
from .utils import generate_random
from .utils import region_sync_meta
import AnonymousAuth
from email.header import decode_header
from ordereddict import OrderedDict
from boto.s3.cors import CORSConfiguration
from . import (
nuke_prefixed_buckets,
get_new_bucket,
get_new_bucket_name,
s3,
targets,
config,
get_prefix,
is_slow_backend,
)
NONEXISTENT_EMAIL = '[email protected]'
def not_eq(a, b):
assert a != b, "%r == %r" % (a, b)
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
def check_grants(got, want):
"""
Check that grants list in got matches the dictionaries in want,
in any order.
"""
eq(len(got), len(want))
got = sorted(got, key=operator.attrgetter('id'))
want = sorted(want, key=operator.itemgetter('id'))
for g, w in zip(got, want):
w = dict(w)
eq(g.permission, w.pop('permission'))
eq(g.id, w.pop('id'))
eq(g.display_name, w.pop('display_name'))
eq(g.uri, w.pop('uri'))
eq(g.email_address, w.pop('email_address'))
eq(g.type, w.pop('type'))
eq(w, {})
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty buckets return no contents')
def test_bucket_list_empty():
bucket = get_new_bucket()
l = bucket.list()
l = list(l)
eq(l, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='distinct buckets have different contents')
def test_bucket_list_distinct():
bucket1 = get_new_bucket()
bucket2 = get_new_bucket()
key = bucket1.new_key('asdf')
key.set_contents_from_string('asdf')
l = bucket2.list()
l = list(l)
eq(l, [])
def _create_keys(bucket=None, keys=[]):
"""
Populate a (specified or new) bucket with objects with
specified names (and contents identical to their names).
"""
if bucket is None:
bucket = get_new_bucket()
for s in keys:
key = bucket.new_key(s)
key.set_contents_from_string(s)
return bucket
def _get_keys_prefixes(li):
"""
figure out which of the strings in a list are actually keys
return lists of strings that are (keys) and are not (prefixes)
"""
keys = [x for x in li if isinstance(x, boto.s3.key.Key)]
prefixes = [x for x in li if not isinstance(x, boto.s3.key.Key)]
return (keys, prefixes)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
def test_bucket_list_many():
bucket = _create_keys(keys=['foo', 'bar', 'baz'])
# bucket.list() is high-level and will not let us set max-keys,
# using it would require using >1000 keys to test, and that would
# be too slow; use the lower-level call bucket.get_all_keys()
# instead
l = bucket.get_all_keys(max_keys=2)
eq(len(l), 2)
eq(l.is_truncated, True)
names = [e.name for e in l]
eq(names, ['bar', 'baz'])
l = bucket.get_all_keys(max_keys=2, marker=names[-1])
eq(len(l), 1)
eq(l.is_truncated, False)
names = [e.name for e in l]
eq(names, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_basic():
bucket = _create_keys(keys=['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf'])
# listings should treat / delimiter in a directory-like fashion
li = bucket.list(delimiter='/')
eq(li.delimiter, '/')
# asdf is the only terminal object that should appear in the listing
(keys,prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['asdf'])
# In Amazon, you will have two CommonPrefixes elements, each with a single
# prefix. According to Amazon documentation
# (http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html),
# the response's CommonPrefixes should contain all the prefixes, which DHO
# does.
#
# Unfortunately, boto considers a CommonPrefixes element as a prefix, and
# will store the last Prefix element within a CommonPrefixes element,
# effectively overwriting any other prefixes.
# the other returned values should be the pure prefixes foo/ and quux/
prefix_names = [e.name for e in prefixes]
eq(len(prefixes), 2)
eq(prefix_names, ['foo/', 'quux/'])
def validate_bucket_list(bucket, prefix, delimiter, marker, max_keys,
is_truncated, check_objs, check_prefixes, next_marker):
#
li = bucket.get_all_keys(delimiter=delimiter, prefix=prefix, max_keys=max_keys, marker=marker)
eq(li.is_truncated, is_truncated)
eq(li.next_marker, next_marker)
(keys, prefixes) = _get_keys_prefixes(li)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
objs = [e.name for e in keys]
eq(objs, check_objs)
prefix_names = [e.name for e in prefixes]
eq(prefix_names, check_prefixes)
return li.next_marker
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_prefix():
bucket = _create_keys(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
marker = validate_bucket_list(bucket, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket, prefix, delim, marker, 1, False, [], ['cquux/'], None)
marker = validate_bucket_list(bucket, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket, prefix, delim, marker, 2, False, [], ['cquux/'], None)
prefix = 'boo/'
marker = validate_bucket_list(bucket, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
marker = validate_bucket_list(bucket, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
marker = validate_bucket_list(bucket, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-slash delimiter characters')
def test_bucket_list_delimiter_alt():
bucket = _create_keys(keys=['bar', 'baz', 'cab', 'foo'])
li = bucket.list(delimiter='a')
eq(li.delimiter, 'a')
# foo contains no 'a' and so is a complete key
(keys,prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefix_names = [e.name for e in prefixes]
eq(len(prefixes), 2)
eq(prefix_names, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-printable delimiter can be specified')
def test_bucket_list_delimiter_unreadable():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket = _create_keys(keys=key_names)
li = bucket.list(delimiter='\x0a')
eq(li.delimiter, '\x0a')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty delimiter can be specified')
def test_bucket_list_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket = _create_keys(keys=key_names)
li = bucket.list(delimiter='')
eq(li.delimiter, '')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unspecified delimiter defaults to none')
def test_bucket_list_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket = _create_keys(keys=key_names)
li = bucket.list()
eq(li.delimiter, '')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unused delimiter is not found')
def test_bucket_list_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket = _create_keys(keys=key_names)
li = bucket.list(delimiter='/')
eq(li.delimiter, '/')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='returns only objects under prefix')
def test_bucket_list_prefix_basic():
bucket = _create_keys(keys=['foo/bar', 'foo/baz', 'quux'])
li = bucket.list(prefix='foo/')
eq(li.prefix, 'foo/')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
# just testing that we can do the delimeter and prefix logic on non-slashes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='prefixes w/o delimiters')
def test_bucket_list_prefix_alt():
bucket = _create_keys(keys=['bar', 'baz', 'foo'])
li = bucket.list(prefix='ba')
eq(li.prefix, 'ba')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='empty prefix returns everything')
def test_bucket_list_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket = _create_keys(keys=key_names)
li = bucket.list(prefix='')
eq(li.prefix, '')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='unspecified prefix returns everything')
def test_bucket_list_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket = _create_keys(keys=key_names)
li = bucket.list()
eq(li.prefix, '')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='nonexistent prefix returns nothing')
def test_bucket_list_prefix_not_exist():
bucket = _create_keys(keys=['foo/bar', 'foo/baz', 'quux'])
li = bucket.list(prefix='d')
eq(li.prefix, 'd')
(keys, prefixes) = _get_keys_prefixes(li)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='non-printable prefix can be specified')
def test_bucket_list_prefix_unreadable():
# FIX: shouldn't this test include strings that start with the tested prefix
bucket = _create_keys(keys=['foo/bar', 'foo/baz', 'quux'])
li = bucket.list(prefix='\x0a')
eq(li.prefix, '\x0a')
(keys, prefixes) = _get_keys_prefixes(li)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
def test_bucket_list_prefix_delimiter_basic():
bucket = _create_keys(keys=['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf'])
li = bucket.list(prefix='foo/', delimiter='/')
eq(li.prefix, 'foo/')
eq(li.delimiter, '/')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['foo/bar'])
prefix_names = [e.name for e in prefixes]
eq(prefix_names, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='non-slash delimiters')
def test_bucket_list_prefix_delimiter_alt():
bucket = _create_keys(keys=['bar', 'bazar', 'cab', 'foo'])
li = bucket.list(prefix='ba', delimiter='a')
eq(li.prefix, 'ba')
eq(li.delimiter, 'a')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['bar'])
prefix_names = [e.name for e in prefixes]
eq(prefix_names, ['baza'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
def test_bucket_list_prefix_delimiter_prefix_not_exist():
bucket = _create_keys(keys=['b/a/r', 'b/a/c', 'b/a/g', 'g'])
li = bucket.list(prefix='d', delimiter='/')
(keys, prefixes) = _get_keys_prefixes(li)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
def test_bucket_list_prefix_delimiter_delimiter_not_exist():
bucket = _create_keys(keys=['b/a/c', 'b/a/g', 'b/a/r', 'g'])
li = bucket.list(prefix='b', delimiter='z')
(keys, prefixes) = _get_keys_prefixes(li)
names = [e.name for e in keys]
eq(names, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
bucket = _create_keys(keys=['b/a/c', 'b/a/g', 'b/a/r', 'g'])
li = bucket.list(prefix='y', delimiter='z')
(keys, prefixes) = _get_keys_prefixes(li)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=1, marker')
def test_bucket_list_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys(max_keys=1)
eq(len(li), 1)
eq(li.is_truncated, True)
names = [e.name for e in li]
eq(names, key_names[0:1])
li = bucket.get_all_keys(marker=key_names[0])
eq(li.is_truncated, False)
names = [e.name for e in li]
eq(names, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=0')
def test_bucket_list_maxkeys_zero():
bucket = _create_keys(keys=['bar', 'baz', 'foo', 'quxx'])
li = bucket.get_all_keys(max_keys=0)
eq(li.is_truncated, False)
eq(li, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/o max_keys')
def test_bucket_list_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys()
eq(li.is_truncated, False)
names = [e.name for e in li]
eq(names, key_names)
eq(li.MaxKeys, '1000')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='invalid max_keys')
def test_bucket_list_maxkeys_invalid():
bucket = _create_keys(keys=['bar', 'baz', 'foo', 'quxx'])
e = assert_raises(boto.exception.S3ResponseError, bucket.get_all_keys, max_keys='blah')
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidArgument')
@attr('fails_on_rgw')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing max_keys')
def test_bucket_list_maxkeys_unreadable():
bucket = _create_keys(keys=['bar', 'baz', 'foo', 'quxx'])
e = assert_raises(boto.exception.S3ResponseError, bucket.get_all_keys, max_keys='\x0a')
eq(e.status, 400)
eq(e.reason, 'Bad Request')
# Weird because you can clearly see an InvalidArgument error code. What's
# also funny is the Amazon tells us that it's not an interger or within an
# integer range. Is 'blah' in the integer range?
eq(e.error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, no marker')
def test_bucket_list_marker_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys()
eq(li.marker, '')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, empty marker')
def test_bucket_list_marker_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys(marker='')
eq(li.marker, '')
eq(li.is_truncated, False)
names = [e.name for e in li]
eq(names, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing marker')
def test_bucket_list_marker_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys(marker='\x0a')
eq(li.marker, '\x0a')
eq(li.is_truncated, False)
names = [e.name for e in li]
eq(names, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker not-in-list')
def test_bucket_list_marker_not_in_list():
bucket = _create_keys(keys=['bar', 'baz', 'foo', 'quxx'])
li = bucket.get_all_keys(marker='blah')
eq(li.marker, 'blah')
names = [e.name for e in li]
eq(names, ['foo', 'quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker after list')
def test_bucket_list_marker_after_list():
bucket = _create_keys(keys=['bar', 'baz', 'foo', 'quxx'])
li = bucket.get_all_keys(marker='zzz')
eq(li.marker, 'zzz')
eq(li.is_truncated, False)
eq(li, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker before list')
def test_bucket_list_marker_before_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys(marker='aaa')
eq(li.marker, 'aaa')
eq(li.is_truncated, False)
names = [e.name for e in li]
eq(names, key_names)
def _compare_dates(iso_datetime, http_datetime):
"""
compare an iso date and an http date, within an epsiolon
"""
date = isodate.parse_datetime(iso_datetime)
pd = email.utils.parsedate_tz(http_datetime)
tz = isodate.tzinfo.FixedOffset(0, pd[-1]/60, 'who cares')
date2 = datetime.datetime(*pd[:6], tzinfo=tz)
# our tolerance
minutes = 5
acceptable_delta = datetime.timedelta(minutes=minutes)
assert abs(date - date2) < acceptable_delta, \
("Times are not within {minutes} minutes of each other: "
+ "{date1!r}, {date2!r}"
).format(
minutes=minutes,
date1=iso_datetime,
date2=http_datetime,
)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list')
@attr(assertion='return same metadata')
def test_bucket_list_return_data():
key_names = ['bar', 'baz', 'foo']
bucket = _create_keys(keys=key_names)
# grab the data from each key individually
data = {}
for key_name in key_names:
key = bucket.get_key(key_name)
acl = key.get_acl()
data.update({
key_name: {
'user_id': acl.owner.id,
'display_name': acl.owner.display_name,
'etag': key.etag,
'last_modified': key.last_modified,
'size': key.size,
'md5': key.md5,
'content_encoding': key.content_encoding,
}
})
# now grab the data from each key through list
li = bucket.list()
for key in li:
key_data = data[key.name]
eq(key.content_encoding, key_data['content_encoding'])
eq(key.owner.display_name, key_data['display_name'])
eq(key.etag, key_data['etag'])
eq(key.md5, key_data['md5'])
eq(key.size, key_data['size'])
eq(key.owner.id, key_data['user_id'])
_compare_dates(key.last_modified, key_data['last_modified'])
@attr(resource='object.metadata')
@attr(method='head')
@attr(operation='modification-times')
@attr(assertion='http and ISO-6801 times agree')
def test_bucket_list_object_time():
bucket = _create_keys(keys=['foo'])
# Wed, 10 Aug 2011 21:58:25 GMT'
key = bucket.get_key('foo')
http_datetime = key.last_modified
# ISO-6801 formatted datetime
# there should be only one element, but list doesn't have a __getitem__
# only an __iter__
for key in bucket.list():
iso_datetime = key.last_modified
_compare_dates(iso_datetime, http_datetime)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_notexist():
# generate a (hopefully) unique, not-yet existent bucket name
name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name)
e = assert_raises(boto.exception.S3ResponseError, s3.main.get_bucket, name)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_delete_notexist():
name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name)
e = assert_raises(boto.exception.S3ResponseError, s3.main.delete_bucket, name)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-empty bucket')
@attr(assertion='fails 409')
def test_bucket_delete_nonempty():
bucket = get_new_bucket()
# fill up bucket
key = bucket.new_key('foo')
key.set_contents_from_string('foocontent')
# try to delete
e = assert_raises(boto.exception.S3ResponseError, bucket.delete)
eq(e.status, 409)
eq(e.reason, 'Conflict')
eq(e.error_code, 'BucketNotEmpty')
@attr(resource='object')
@attr(method='put')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_object_write_to_nonexist_bucket():
name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name)
bucket = s3.main.get_bucket(name, validate=False)
key = bucket.new_key('foo123bar')
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'foo')
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='deleted bucket')
@attr(assertion='fails 404')
def test_bucket_create_delete():
name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name)
bucket = get_new_bucket(targets.main.default, name)
# make sure it's actually there
s3.main.get_bucket(bucket.name)
bucket.delete()
# make sure it's gone
e = assert_raises(boto.exception.S3ResponseError, bucket.delete)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written')
@attr(assertion='fails 404')
def test_object_read_notexist():
bucket = get_new_bucket()
key = bucket.new_key('foobar')
e = assert_raises(boto.exception.S3ResponseError, key.get_contents_as_string)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='write to special characters key')
@attr(assertion='fails 404')
def test_object_create_special_characters():
bucket = get_new_bucket()
# sourced from: http://xml.silmaril.ie/specials.html
key = bucket.new_key('<&>"\'')
key.set_contents_from_string('bar')
got = key.get_contents_as_string()
eq(got, 'bar')
bucket.get_all_keys()
# While the test itself passes, there's a SAX parser error during teardown. It
# seems to be a boto bug. It happens with both amazon and dho.
# http://code.google.com/p/boto/issues/detail?id=501
@attr(resource='object')
@attr(method='put')
@attr(operation='write to non-printing key')
@attr(assertion='fails 404')
def test_object_create_unreadable():
bucket = get_new_bucket()
key = bucket.new_key('\x0a')
key.set_contents_from_string('bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects')
@attr(assertion='deletes multiple objects with a single call')
def test_multi_object_delete():
bucket = get_new_bucket()
key0 = bucket.new_key('key0')
key0.set_contents_from_string('foo')
key1 = bucket.new_key('key1')
key1.set_contents_from_string('bar')
stored_keys = bucket.get_all_keys()
eq(len(stored_keys), 2)
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 2)
eq(len(result.errors), 0)
eq(len(bucket.get_all_keys()), 0)
# now remove again, should all succeed due to idempotency
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 2)
eq(len(result.errors), 0)
eq(len(bucket.get_all_keys()), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct etag')
def test_object_write_check_etag():
bucket = get_new_bucket()
key = bucket.new_key('foo')
res = _make_request('PUT', bucket, key, body='bar', authenticated=True)
eq(res.status, 200)
eq(res.reason, 'OK')
eq(res.getheader("ETag"), '"37b51d194a7513e45b56f6524f2d51f2"')
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct cache control header')
def test_object_write_cache_control():
bucket = get_new_bucket()
key = bucket.new_key('foo')
cache_control = 'public, max-age=14400'
key.set_contents_from_string('bar', headers = {'Cache-Control': cache_control})
key2 = bucket.get_key('foo')
eq(key2.cache_control, cache_control)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct expires header')
def test_object_write_expires():
bucket = get_new_bucket()
key = bucket.new_key('foo')
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
key.set_contents_from_string('bar', headers = {'Expires': expires})
key2 = bucket.get_key('foo')
eq(key2.expires, expires)
@attr(resource='object')
@attr(method='all')
@attr(operation='complete object life cycle')
@attr(assertion='read back what we wrote and rewrote')
def test_object_write_read_update_read_delete():
bucket = get_new_bucket()
# Write
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
# Read
got = key.get_contents_as_string()
eq(got, 'bar')
# Update
key.set_contents_from_string('soup')
# Read
got = key.get_contents_as_string()
eq(got, 'soup')
# Delete
key.delete()
def _set_get_metadata(metadata, bucket=None):
"""
create a new key in a (new or specified) bucket,
set the meta1 property to a specified, value,
and then re-read and return that property
"""
if bucket is None:
bucket = get_new_bucket()
key = boto.s3.key.Key(bucket)
key.key = ('foo')
key.set_metadata('meta1', metadata)
key.set_contents_from_string('bar')
key2 = bucket.get_key('foo')
return key2.get_metadata('meta1')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='reread what we wrote')
def test_object_set_get_metadata_none_to_good():
got = _set_get_metadata('mymeta')
eq(got, 'mymeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='write empty value, returns empty value')
def test_object_set_get_metadata_none_to_empty():
got = _set_get_metadata('')
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='new value replaces old')
def test_object_set_get_metadata_overwrite_to_good():
bucket = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket)
eq(got, 'oldmeta')
got = _set_get_metadata('newmeta', bucket)
eq(got, 'newmeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='empty value replaces old')
def test_object_set_get_metadata_overwrite_to_empty():
bucket = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket)
eq(got, 'oldmeta')
got = _set_get_metadata('', bucket)
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='UTF-8 values passed through')
def test_object_set_get_unicode_metadata():
bucket = get_new_bucket()
key = boto.s3.key.Key(bucket)
key.key = (u'foo')
key.set_metadata('meta1', u"Hello World\xe9")
key.set_contents_from_string('bar')
key2 = bucket.get_key('foo')
got = key2.get_metadata('meta1')
eq(got, u"Hello World\xe9")
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='non-UTF-8 values detected, but preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_non_utf8_metadata():
bucket = get_new_bucket()
key = boto.s3.key.Key(bucket)
key.key = ('foo')
key.set_metadata('meta1', '\x04mymeta')
key.set_contents_from_string('bar')
key2 = bucket.get_key('foo')
got = key2.get_metadata('meta1')
eq(got, '=?UTF-8?Q?=04mymeta?=')
def _set_get_metadata_unreadable(metadata, bucket=None):
"""
set and then read back a meta-data value (which presumably
includes some interesting characters), and return a list
containing the stored value AND the encoding with which it
was returned.
"""
got = _set_get_metadata(metadata, bucket)
got = decode_header(got)
return got
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting prefixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_empty_to_unreadable_prefix():
metadata = '\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting suffixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_empty_to_unreadable_suffix():
metadata = 'h\x04'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting in-fixes noted and preserved')
def test_object_set_get_metadata_empty_to_unreadable_infix():
metadata = 'h\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting prefixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_overwrite_to_unreadable_prefix():
metadata = '\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = '\x05w'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting suffixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_overwrite_to_unreadable_suffix():
metadata = 'h\x04'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = 'h\x05'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting in-fixes noted and preserved')
def test_object_set_get_metadata_overwrite_to_unreadable_infix():
metadata = 'h\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = 'h\x05w'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write')
@attr(assertion='replaces previous metadata')
def test_object_metadata_replaced_on_put():
bucket = get_new_bucket()
# create object with metadata
key = bucket.new_key('foo')
key.set_metadata('meta1', 'bar')
key.set_contents_from_string('bar')
# overwrite previous object, no metadata
key2 = bucket.new_key('foo')
key2.set_contents_from_string('bar')
# should see no metadata, as per 2nd write
key3 = bucket.get_key('foo')
got = key3.get_metadata('meta1')
assert got is None, "did not expect to see metadata: %r" % got
@attr(resource='object')
@attr(method='put')
@attr(operation='data write from file (w/100-Continue)')
@attr(assertion='succeeds and returns written data')
def test_object_write_file():
# boto Key.set_contents_from_file / .send_file uses Expect:
# 100-Continue, so this test exercises that (though a bit too
# subtly)
bucket = get_new_bucket()
key = bucket.new_key('foo')
data = StringIO('bar')
key.set_contents_from_file(fp=data)
got = key.get_contents_as_string()
eq(got, 'bar')
def _get_post_url(conn, bucket):
url = '{protocol}://{host}:{port}/{bucket}'.format(protocol= 'https' if conn.is_secure else 'http',\
host=conn.host, port=conn.port, bucket=bucket.name)
return url
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_anonymous_request():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
bucket.set_acl('public-read-write')
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("foo.txt")
got = key.get_contents_as_string()
eq(got, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_request():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("foo.txt")
got = key.get_contents_as_string()
eq(got, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, bad access key')
@attr(assertion='fails')
def test_post_object_authenticated_request_bad_access_key():
bucket = get_new_bucket()
bucket.set_acl('public-read-write')
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 201')
def test_post_object_set_success_code():
bucket = get_new_bucket()
bucket.set_acl('public-read-write')
url = _get_post_url(s3.main, bucket)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "201"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 201)
message = ET.fromstring(r.content).find('Key')
eq(message.text,'foo.txt')
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_set_invalid_success_code():
bucket = get_new_bucket()
bucket.set_acl('public-read-write')
url = _get_post_url(s3.main, bucket)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "404"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
eq(r.content,'')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_upload_larger_than_chunk():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', foo_string)])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("foo.txt")
got = key.get_contents_as_string()
eq(got, foo_string)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_set_key_from_filename():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("foo.txt")
got = key.get_contents_as_string()
eq(got, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_ignored_header():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_case_insensitive_condition_fields():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bUcKeT": bucket.name},\
["StArTs-WiTh", "$KeY", "foo"],\
{"AcL": "private"},\
["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with escaped leading $ and returns written data')
def test_post_object_escaped_field_values():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("\$foo.txt")
got = key.get_contents_as_string()
eq(got, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns redirect url')
def test_post_object_success_redirect_action():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
redirect_url = _get_post_url(s3.main, bucket)
bucket.set_acl('public-read')
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["eq", "$success_action_redirect", redirect_url],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 200)
url = r.url
key = bucket.get_key("foo.txt")
eq(url,
'{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url, bucket = bucket.name,
key = key.name, etag = key.etag.strip('"')))
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid signature error')
def test_post_object_invalid_signature():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())[::-1]
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with access key does not exist error')
def test_post_object_invalid_access_key():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id[::-1]),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid expiration error')
def test_post_object_invalid_date_format():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": str(expires),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing key error')
def test_post_object_no_key_specified():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing signature error')
def test_post_object_missing_signature():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with extra input fields policy error')
def test_post_object_missing_policy_condition():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds using starts-with restriction on metadata header')
def test_post_object_user_specified_header():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
key = bucket.get_key("foo.txt")
eq(key.get_metadata('foo'), 'barclamp')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy condition failed error due to missing field in POST request')
def test_post_object_request_missing_policy_specified_field():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with conditions must be list error')
def test_post_object_condition_is_case_sensitive():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"CONDITIONS": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with expiration must be string error')
def test_post_object_expires_is_case_sensitive():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy expired error')
def test_post_object_expired_policy():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails using equality restriction on metadata header')
def test_post_object_invalid_request_field_value():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["eq", "$x-amz-meta-foo", ""]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing expiration error')
def test_post_object_missing_expires_condition():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing conditions error')
def test_post_object_missing_conditions_list():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with allowable upload size exceeded error')
def test_post_object_upload_size_limit_exceeded():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 0]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid content length error')
def test_post_object_missing_content_length_argument():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid JSON error')
def test_post_object_invalid_content_length_argument():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", -1, 0]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with upload size less than minimum allowable error')
def test_post_object_upload_size_below_minimum():
bucket = get_new_bucket()
url = _get_post_url(s3.main, bucket)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket.name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 512, 1000]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
conn = s3.main
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , conn.aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: the latest ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_good():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
key.set_contents_from_string('zar', headers={'If-Match': key.etag.replace('"', '').strip()})
got_new_data = key.get_contents_as_string()
eq(got_new_data, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: outdated ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_failed():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'zar',
headers={'If-Match': 'ABCORZ'})
eq(e.status, 412)
eq(e.reason, 'Precondition Failed')
eq(e.error_code, 'PreconditionFailed')
got_old_data = key.get_contents_as_string()
eq(got_old_data, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-Match: *')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_overwrite_existed_good():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
key.set_contents_from_string('zar', headers={'If-Match': '*'})
got_new_data = key.get_contents_as_string()
eq(got_new_data, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_nonexisted_failed():
bucket = get_new_bucket()
key = bucket.new_key('foo')
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar', headers={'If-Match': '*'})
eq(e.status, 412)
eq(e.reason, 'Precondition Failed')
eq(e.error_code, 'PreconditionFailed')
e = assert_raises(boto.exception.S3ResponseError, key.get_contents_as_string)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_good():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
key.set_contents_from_string('zar', headers={'If-None-Match': 'ABCORZ'})
got_new_data = key.get_contents_as_string()
eq(got_new_data, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_failed():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'zar',
headers={'If-None-Match': key.etag.replace('"', '').strip()})
eq(e.status, 412)
eq(e.reason, 'Precondition Failed')
eq(e.error_code, 'PreconditionFailed')
got_old_data = key.get_contents_as_string()
eq(got_old_data, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-None-Match: *')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_nonexisted_good():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar', headers={'If-None-Match': '*'})
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_overwrite_existed_failed():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
got_data = key.get_contents_as_string()
eq(got_data, 'bar')
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string,
'zar', headers={'If-None-Match': '*'})
eq(e.status, 412)
eq(e.reason, 'Precondition Failed')
eq(e.error_code, 'PreconditionFailed')
got_old_data = key.get_contents_as_string()
eq(got_old_data, 'bar')
def _setup_request(bucket_acl=None, object_acl=None):
"""
add a foo key, and specified key and bucket acls to
a (new or existing) bucket.
"""
bucket = _create_keys(keys=['foo'])
key = bucket.get_key('foo')
if bucket_acl is not None:
bucket.set_acl(bucket_acl)
if object_acl is not None:
key.set_acl(object_acl)
return (bucket, key)
def _setup_bucket_request(bucket_acl=None):
"""
set up a (new or existing) bucket with specified acl
"""
bucket = get_new_bucket()
if bucket_acl is not None:
bucket.set_acl(bucket_acl)
return bucket
def _make_request(method, bucket, key, body=None, authenticated=False, response_headers=None, expires_in=100000):
"""
issue a request for a specified method, on a specified <bucket,key>,
with a specified (optional) body (encrypted per the connection), and
return the response (status, reason)
"""
if authenticated:
url = key.generate_url(expires_in, method=method, response_headers=response_headers)
o = urlparse(url)
path = o.path + '?' + o.query
else:
path = '/{bucket}/{obj}'.format(bucket=key.bucket.name, obj=key.name)
if s3.main.is_secure:
class_ = HTTPSConnection
else:
class_ = HTTPConnection
c = class_(s3.main.host, s3.main.port, strict=True)
c.request(method, path, body=body)
res = c.getresponse()
print res.status, res.reason
return res
def _make_bucket_request(method, bucket, body=None, authenticated=False, expires_in=100000):
"""
issue a request for a specified method, on a specified <bucket,key>,
with a specified (optional) body (encrypted per the connection), and
return the response (status, reason)
"""
if authenticated:
url = bucket.generate_url(expires_in, method=method)
o = urlparse(url)
path = o.path + '?' + o.query
else:
path = '/{bucket}'.format(bucket=bucket.name)
if s3.main.is_secure:
class_ = HTTPSConnection
else:
class_ = HTTPConnection
c = class_(s3.main.host, s3.main.port, strict=True)
c.request(method, path, body=body)
res = c.getresponse()
print res.status, res.reason
return res
@attr(resource='object')
@attr(method='get')
@attr(operation='publically readable bucket')
@attr(assertion='bucket is readable')
def test_object_raw_get():
(bucket, key) = _setup_request('public-read', 'public-read')
res = _make_request('GET', bucket, key)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_get_bucket_gone():
(bucket, key) = _setup_request('public-read', 'public-read')
key.delete()
bucket.delete()
res = _make_request('GET', bucket, key)
eq(res.status, 404)
eq(res.reason, 'Not Found')
@attr(resource='object')
@attr(method='delete')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_delete_key_bucket_gone():
(bucket, key) = _setup_request()
key.delete()
bucket.delete()
e = assert_raises(boto.exception.S3ResponseError, key.delete)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object')
@attr(assertion='fails 404')
def test_object_raw_get_object_gone():
(bucket, key) = _setup_request('public-read', 'public-read')
key.delete()
res = _make_request('GET', bucket, key)
eq(res.status, 404)
eq(res.reason, 'Not Found')
def _head_bucket(bucket, authenticated=True):
res = _make_bucket_request('HEAD', bucket, authenticated=authenticated)
eq(res.status, 200)
eq(res.reason, 'OK')
result = {}
obj_count = res.getheader('x-rgw-object-count')
if obj_count != None:
result['x-rgw-object-count'] = int(obj_count)
bytes_used = res.getheader('x-rgw-bytes-used')
if bytes_used is not None:
result['x-rgw-bytes-used'] = int(bytes_used)
return result
@attr(resource='bucket')
@attr(method='head')
@attr(operation='head bucket')
@attr(assertion='succeeds')
def test_bucket_head():
bucket = get_new_bucket()
_head_bucket(bucket)
# This test relies on Ceph extensions.
# http://tracker.ceph.com/issues/2313
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='read bucket extended information')
@attr(assertion='extended information is getting updated')
def test_bucket_head_extended():
bucket = get_new_bucket()
result = _head_bucket(bucket)
eq(result.get('x-rgw-object-count', 0), 0)
eq(result.get('x-rgw-bytes-used', 0), 0)
_create_keys(bucket, keys=['foo', 'bar', 'baz'])
result = _head_bucket(bucket)
eq(result.get('x-rgw-object-count', 3), 3)
assert result.get('x-rgw-bytes-used', 9) > 0
@attr(resource='bucket.acl')
@attr(method='get')
@attr(operation='unauthenticated on private bucket')
@attr(assertion='succeeds')
def test_object_raw_get_bucket_acl():
(bucket, key) = _setup_request('private', 'public-read')
res = _make_request('GET', bucket, key)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object.acl')
@attr(method='get')
@attr(operation='unauthenticated on private object')
@attr(assertion='fails 403')
def test_object_raw_get_object_acl():
(bucket, key) = _setup_request('public-read', 'private')
res = _make_request('GET', bucket, key)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/object')
@attr(assertion='succeeds')
def test_object_raw_authenticated():
(bucket, key) = _setup_request('public-read', 'public-read')
res = _make_request('GET', bucket, key, authenticated=True)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on private bucket/private object with modified response headers')
@attr(assertion='succeeds')
@attr('fails_on_rgw')
def test_object_raw_response_headers():
(bucket, key) = _setup_request('private', 'private')
response_headers = {
'response-content-type': 'foo/bar',
'response-content-disposition': 'bla',
'response-content-language': 'esperanto',
'response-content-encoding': 'aaa',
'response-expires': '123',
'response-cache-control': 'no-cache',
}
res = _make_request('GET', bucket, key, authenticated=True,
response_headers=response_headers)
eq(res.status, 200)
eq(res.reason, 'OK')
eq(res.getheader('content-type'), 'foo/bar')
eq(res.getheader('content-disposition'), 'bla')
eq(res.getheader('content-language'), 'esperanto')
eq(res.getheader('content-encoding'), 'aaa')
eq(res.getheader('expires'), '123')
eq(res.getheader('cache-control'), 'no-cache')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on private bucket/public object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_bucket_acl():
(bucket, key) = _setup_request('private', 'public-read')
res = _make_request('GET', bucket, key, authenticated=True)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/private object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_object_acl():
(bucket, key) = _setup_request('public-read', 'private')
res = _make_request('GET', bucket, key, authenticated=True)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_authenticated_bucket_gone():
(bucket, key) = _setup_request('public-read', 'public-read')
key.delete()
bucket.delete()
res = _make_request('GET', bucket, key, authenticated=True)
eq(res.status, 404)
eq(res.reason, 'Not Found')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object')
@attr(assertion='fails 404')
def test_object_raw_authenticated_object_gone():
(bucket, key) = _setup_request('public-read', 'public-read')
key.delete()
res = _make_request('GET', bucket, key, authenticated=True)
eq(res.status, 404)
eq(res.reason, 'Not Found')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, no object acls')
@attr(assertion='fails 403')
def test_object_raw_put():
bucket = get_new_bucket()
key = bucket.new_key('foo')
res = _make_request('PUT', bucket, key, body='foo')
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, publically writable object')
@attr(assertion='succeeds')
def test_object_raw_put_write_access():
bucket = get_new_bucket()
bucket.set_acl('public-read-write')
key = bucket.new_key('foo')
res = _make_request('PUT', bucket, key, body='foo')
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated():
bucket = get_new_bucket()
key = bucket.new_key('foo')
res = _make_request('PUT', bucket, key, body='foo', authenticated=True)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated_expired():
bucket = get_new_bucket()
key = bucket.new_key('foo')
res = _make_request('PUT', bucket, key, body='foo', authenticated=True, expires_in=-1000)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
def check_bad_bucket_name(name):
"""
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidBucketName')
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='name begins with underscore')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_starts_nonalpha():
bucket_name = get_new_bucket_name()
check_bad_bucket_name('_' + bucket_name)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='empty name')
@attr(assertion='fails 405')
def test_bucket_create_naming_bad_short_empty():
# bucket creates where name is empty look like PUTs to the parent
# resource (with slash), hence their error response is different
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, '')
eq(e.status, 405)
eq(e.reason, 'Method Not Allowed')
eq(e.error_code, 'MethodNotAllowed')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (one character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_one():
check_bad_bucket_name('a')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (two character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_two():
check_bad_bucket_name('aa')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='excessively long names')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_long():
check_bad_bucket_name(256*'a')
check_bad_bucket_name(280*'a')
check_bad_bucket_name(3000*'a')
def check_good_bucket_name(name, _prefix=None):
"""
Attempt to create a bucket with a specified name
and (specified or default) prefix, returning the
results of that effort.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
if _prefix is None:
_prefix = get_prefix()
get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=_prefix,
name=name,
))
def _test_bucket_create_naming_good_long(length):
"""
Attempt to create a bucket whose name (including the
prefix) is of a specified length.
"""
prefix = get_new_bucket_name()
assert len(prefix) < 255
num = length - len(prefix)
get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix,
name=num*'a',
))
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/250 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_250():
_test_bucket_create_naming_good_long(250)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/251 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_251():
_test_bucket_create_naming_good_long(251)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/252 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_252():
_test_bucket_create_naming_good_long(252)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/253 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_253():
_test_bucket_create_naming_good_long(253)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/254 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_254():
_test_bucket_create_naming_good_long(254)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/255 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_255():
_test_bucket_create_naming_good_long(255)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list w/251 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_list_long_name():
prefix = get_new_bucket_name()
length = 251
num = length - len(prefix)
bucket = get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix,
name=num*'a',
))
got = bucket.list()
got = list(got)
eq(got, [])
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/ip address for name')
@attr(assertion='fails on aws')
def test_bucket_create_naming_bad_ip():
check_bad_bucket_name('192.168.5.123')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/! in name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_bad_punctuation():
# characters other than [a-zA-Z0-9._-]
check_bad_bucket_name('alpha!soup')
# test_bucket_create_naming_dns_* are valid but not recommended
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/underscore in name')
@attr(assertion='succeeds')
def test_bucket_create_naming_dns_underscore():
check_good_bucket_name('foo_bar')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/100 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_dns_long():
prefix = get_prefix()
assert len(prefix) < 50
num = 100 - len(prefix)
check_good_bucket_name(num * 'a')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/dash at end of name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_dns_dash_at_end():
check_good_bucket_name('foo-')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.. in name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_dns_dot_dot():
check_good_bucket_name('foo..bar')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.- in name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_dns_dot_dash():
check_good_bucket_name('foo.-bar')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/-. in name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_dns_dash_dot():
check_good_bucket_name('foo-.bar')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create')
@attr(assertion='idempotent success')
def test_bucket_create_exists():
bucket = get_new_bucket(targets.main.default)
# REST idempotency means this should be a nop
get_new_bucket(targets.main.default, bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get location')
def test_bucket_get_location():
bucket = get_new_bucket(targets.main.default)
actual_location = bucket.get_location()
expected_location = targets.main.default.conf.api_name
eq(actual_location, expected_location)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create by non-owner')
@attr(assertion='fails 409')
def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket = get_new_bucket()
e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt.default, bucket.name)
eq(e.status, 409)
eq(e.reason, 'Conflict')
eq(e.error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='delete by non-owner')
@attr(assertion='fails')
def test_bucket_delete_nonowner():
bucket = get_new_bucket()
check_access_denied(s3.alt.delete_bucket, bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_default():
bucket = get_new_bucket()
policy = bucket.get_acl()
print repr(policy)
eq(policy.owner.type, None)
eq(policy.owner.id, config.main.user_id)
eq(policy.owner.display_name, config.main.display_name)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='public-read acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_canned_during_create():
name = get_new_bucket_name()
bucket = targets.main.default.connection.create_bucket(name, policy = 'public-read')
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: public-read,private')
@attr(assertion='read back expected values')
def test_bucket_acl_canned():
bucket = get_new_bucket()
# Since it defaults to private, set it public-read first
bucket.set_acl('public-read')
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
# Then back to private.
bucket.set_acl('private')
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='put')
@attr(operation='acl: public-read-write')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_publicreadwrite():
bucket = get_new_bucket()
bucket.set_acl('public-read-write')
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
dict(
permission='WRITE',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: authenticated-read')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_authenticatedread():
bucket = get_new_bucket()
bucket.set_acl('authenticated-read')
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='object.acls')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_object_acl_default():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_during_create():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar', policy='public-read')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read,private')
@attr(assertion='read back expected values')
def test_object_acl_canned():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
# Since it defaults to private, set it public-read first
key.set_acl('public-read')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
# Then back to private.
key.set_acl('private')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='put')
@attr(operation='acl public-read-write')
@attr(assertion='read back expected values')
def test_object_acl_canned_publicreadwrite():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.set_acl('public-read-write')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
dict(
permission='WRITE',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl authenticated-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_authenticatedread():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.set_acl('authenticated-read')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
email_address=None,
type='Group',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo')
key.set_contents_from_string('bar')
bucket_policy = bucket.get_acl()
bucket_owner_id = bucket_policy.owner.id
bucket_owner_display = bucket_policy.owner.display_name
key.set_acl('bucket-owner-read')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=bucket_owner_id,
display_name=bucket_owner_display,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
key.delete()
bucket.delete()
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo')
key.set_contents_from_string('bar')
bucket_policy = bucket.get_acl()
bucket_owner_id = bucket_policy.owner.id
bucket_owner_display = bucket_policy.owner.display_name
key.set_acl('bucket-owner-full-control')
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='FULL_CONTROL',
id=bucket_owner_id,
display_name=bucket_owner_display,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
key.delete()
bucket.delete()
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
def test_object_acl_full_control_verify_owner():
bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.add_user_grant(permission='FULL_CONTROL', user_id=config.alt.user_id)
k2 = s3.alt.get_bucket(bucket.name).get_key('foo')
k2.add_user_grant(permission='READ_ACP', user_id=config.alt.user_id)
policy = k2.get_acl()
eq(policy.owner.id, config.main.user_id)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@attr(assertion='a private object can be set to private')
def test_bucket_acl_canned_private_to_private():
bucket = get_new_bucket()
bucket.set_acl('private')
def _make_acl_xml(acl):
"""
Return the xml form of an ACL entry
"""
return '<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>' + config.main.user_id + '</ID></Owner>' + acl.to_xml() + '</AccessControlPolicy>'
def _build_bucket_acl_xml(permission, bucket=None):
"""
add the specified permission for the current user to
a (new or specified) bucket, in XML form, set it, and
then read it back to confirm it was correctly set
"""
acl = boto.s3.acl.ACL()
acl.add_user_grant(permission=permission, user_id=config.main.user_id)
XML = _make_acl_xml(acl)
if bucket is None:
bucket = get_new_bucket()
bucket.set_xml_acl(XML)
policy = bucket.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission=permission,
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTROL (xml)')
@attr(assertion='reads back correctly')
def test_bucket_acl_xml_fullcontrol():
_build_bucket_acl_xml('FULL_CONTROL')
@attr(resource='bucket.acls')
@attr(method='ACLs')
@attr(operation='set acl WRITE (xml)')
@attr(assertion='reads back correctly')
def test_bucket_acl_xml_write():
_build_bucket_acl_xml('WRITE')
@attr(resource='bucket.acls')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP (xml)')
@attr(assertion='reads back correctly')
def test_bucket_acl_xml_writeacp():
_build_bucket_acl_xml('WRITE_ACP')
@attr(resource='bucket.acls')
@attr(method='ACLs')
@attr(operation='set acl READ (xml)')
@attr(assertion='reads back correctly')
def test_bucket_acl_xml_read():
_build_bucket_acl_xml('READ')
@attr(resource='bucket.acls')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP (xml)')
@attr(assertion='reads back correctly')
def test_bucket_acl_xml_readacp():
_build_bucket_acl_xml('READ_ACP')
def _build_object_acl_xml(permission):
"""
add the specified permission for the current user to
a new object in a new bucket, in XML form, set it, and
then read it back to confirm it was correctly set
"""
acl = boto.s3.acl.ACL()
acl.add_user_grant(permission=permission, user_id=config.main.user_id)
XML = _make_acl_xml(acl)
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.set_xml_acl(XML)
policy = key.get_acl()
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission=permission,
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTROL (xml)')
@attr(assertion='reads back correctly')
def test_object_acl_xml():
_build_object_acl_xml('FULL_CONTROL')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE (xml)')
@attr(assertion='reads back correctly')
def test_object_acl_xml_write():
_build_object_acl_xml('WRITE')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP (xml)')
@attr(assertion='reads back correctly')
def test_object_acl_xml_writeacp():
_build_object_acl_xml('WRITE_ACP')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ (xml)')
@attr(assertion='reads back correctly')
def test_object_acl_xml_read():
_build_object_acl_xml('READ')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP (xml)')
@attr(assertion='reads back correctly')
def test_object_acl_xml_readacp():
_build_object_acl_xml('READ_ACP')
def _bucket_acl_grant_userid(permission):
"""
create a new bucket, grant a specific user the specified
permission, read back the acl and verify correct setting
"""
bucket = get_new_bucket()
# add alt user
policy = bucket.get_acl()
policy.acl.add_user_grant(permission, config.alt.user_id)
bucket.set_acl(policy)
policy = bucket.get_acl()
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission=permission,
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
return bucket
def _check_bucket_acl_grant_can_read(bucket):
"""
verify ability to read the specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name)
def _check_bucket_acl_grant_cant_read(bucket):
"""
verify inability to read the specified bucket
"""
check_access_denied(s3.alt.get_bucket, bucket.name)
def _check_bucket_acl_grant_can_readacp(bucket):
"""
verify ability to read acls on specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
bucket2.get_acl()
def _check_bucket_acl_grant_cant_readacp(bucket):
"""
verify inability to read acls on specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
check_access_denied(bucket2.get_acl)
def _check_bucket_acl_grant_can_write(bucket):
"""
verify ability to write the specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
key = bucket2.new_key('foo-write')
key.set_contents_from_string('bar')
def _check_bucket_acl_grant_cant_write(bucket):
"""
verify inability to write the specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
key = bucket2.new_key('foo-write')
check_access_denied(key.set_contents_from_string, 'bar')
def _check_bucket_acl_grant_can_writeacp(bucket):
"""
verify ability to set acls on the specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
bucket2.set_acl('public-read')
def _check_bucket_acl_grant_cant_writeacp(bucket):
"""
verify inability to set acls on the specified bucket
"""
bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
check_access_denied(bucket2.set_acl, 'public-read')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid FULL_CONTROL')
@attr(assertion='can read/write data/acls')
def test_bucket_acl_grant_userid_fullcontrol():
bucket = _bucket_acl_grant_userid('FULL_CONTROL')
# alt user can read
_check_bucket_acl_grant_can_read(bucket)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket)
# can write
_check_bucket_acl_grant_can_write(bucket)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket)
# verify owner did not change
bucket2 = s3.main.get_bucket(bucket.name)
policy = bucket2.get_acl()
eq(policy.owner.id, config.main.user_id)
eq(policy.owner.display_name, config.main.display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ')
@attr(assertion='can read data, no other r/w')
def test_bucket_acl_grant_userid_read():
bucket = _bucket_acl_grant_userid('READ')
# alt user can read
_check_bucket_acl_grant_can_read(bucket)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket)
# can't write
_check_bucket_acl_grant_cant_write(bucket)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ_ACP')
@attr(assertion='can read acl, no other r/w')
def test_bucket_acl_grant_userid_readacp():
bucket = _bucket_acl_grant_userid('READ_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket)
# can't write
_check_bucket_acl_grant_cant_write(bucket)
# can't write acp
#_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
_check_bucket_acl_grant_cant_writeacp(bucket)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE')
@attr(assertion='can write data, no other r/w')
def test_bucket_acl_grant_userid_write():
bucket = _bucket_acl_grant_userid('WRITE')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket)
# can write
_check_bucket_acl_grant_can_write(bucket)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE_ACP')
@attr(assertion='can write acls, no other r/w')
def test_bucket_acl_grant_userid_writeacp():
bucket = _bucket_acl_grant_userid('WRITE_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket)
# can't write
_check_bucket_acl_grant_cant_write(bucket)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/invalid userid')
@attr(assertion='fails 400')
def test_bucket_acl_grant_nonexist_user():
bucket = get_new_bucket()
# add alt user
bad_user_id = '_foo'
policy = bucket.get_acl()
policy.acl.add_user_grant('FULL_CONTROL', bad_user_id)
print policy.to_xml()
e = assert_raises(boto.exception.S3ResponseError, bucket.set_acl, policy)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
def test_bucket_acl_no_grants():
bucket = get_new_bucket()
# write content to the bucket
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
# clear grants
policy = bucket.get_acl()
policy.acl.grants = []
# remove read/write permission
bucket.set_acl(policy)
# can read
bucket.get_key('foo')
# can't write
key = bucket.new_key('baz')
check_access_denied(key.set_contents_from_string, 'bar')
# can read acl
bucket.get_acl()
# can write acl
bucket.set_acl('private')
def _get_acl_header(user=None, perms=None):
all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
headers = {}
if user == None:
user = config.alt.user_id
if perms != None:
for perm in perms:
headers["x-amz-grant-{perm}".format(perm=perm)] = "id={uid}".format(uid=user)
else:
for perm in all_headers:
headers["x-amz-grant-{perm}".format(perm=perm)] = "id={uid}".format(uid=user)
return headers
@attr(resource='object')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
def test_object_header_acl_grants():
bucket = get_new_bucket()
headers = _get_acl_header()
k = bucket.new_key("foo_key")
k.set_contents_from_string("bar", headers=headers)
policy = k.get_acl()
check_grants(
policy.acl.grants,
[
dict(
permission='READ',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='WRITE',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ_ACP',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='WRITE_ACP',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='FULL_CONTROL',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
def test_bucket_header_acl_grants():
headers = _get_acl_header()
bucket = get_new_bucket(targets.main.default, get_prefix(), headers)
policy = bucket.get_acl()
check_grants(
policy.acl.grants,
[
dict(
permission='READ',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='WRITE',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ_ACP',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='WRITE_ACP',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='FULL_CONTROL',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
# alt user can write
bucket2 = s3.alt.get_bucket(bucket.name)
key = bucket2.new_key('foo')
key.set_contents_from_string('bar')
# This test will fail on DH Objects. DHO allows multiple users with one account, which
# would violate the uniqueness requirement of a user's email. As such, DHO users are
# created without an email.
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add second FULL_CONTROL user')
@attr(assertion='works for S3, fails for DHO')
def test_bucket_acl_grant_email():
bucket = get_new_bucket()
# add alt user
policy = bucket.get_acl()
policy.acl.add_email_grant('FULL_CONTROL', config.alt.email)
bucket.set_acl(policy)
policy = bucket.get_acl()
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='FULL_CONTROL',
id=config.alt.user_id,
display_name=config.alt.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
],
)
# alt user can write
bucket2 = s3.alt.get_bucket(bucket.name)
key = bucket2.new_key('foo')
key.set_contents_from_string('bar')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add acl for nonexistent user')
@attr(assertion='fail 400')
def test_bucket_acl_grant_email_notexist():
# behavior not documented by amazon
bucket = get_new_bucket()
policy = bucket.get_acl()
policy.acl.add_email_grant('FULL_CONTROL', NONEXISTENT_EMAIL)
e = assert_raises(boto.exception.S3ResponseError, bucket.set_acl, policy)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'UnresolvableGrantByEmailAddress')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='acls read back as empty')
def test_bucket_acl_revoke_all():
# revoke all access, including the owner's access
bucket = get_new_bucket()
policy = bucket.get_acl()
policy.acl.grants = []
bucket.set_acl(policy)
policy = bucket.get_acl()
eq(len(policy.acl.grants), 0)
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
# http://tracker.newdream.net/issues/984
@attr(resource='bucket.log')
@attr(method='put')
@attr(operation='set/enable/disable logging target')
@attr(assertion='operations succeed')
@attr('fails_on_rgw')
def test_logging_toggle():
bucket = get_new_bucket()
log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
log_bucket.set_as_logging_target()
bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
bucket.disable_logging()
# NOTE: this does not actually test whether or not logging works
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
- a: owning user, given ACL
- a2: same object accessed by some other user
- b: owning user, default ACL in bucket w/given ACL
- b2: same object accessed by a some other user
"""
obj = bunch.Bunch()
bucket = get_new_bucket()
bucket.set_acl(bucket_acl)
obj.a = bucket.new_key('foo')
obj.a.set_contents_from_string('foocontent')
obj.a.set_acl(object_acl)
obj.b = bucket.new_key('bar')
obj.b.set_contents_from_string('barcontent')
# bucket2 is being accessed by a different user
obj.bucket2 = s3.alt.get_bucket(bucket.name, validate=False)
obj.a2 = obj.bucket2.new_key(obj.a.name)
obj.b2 = obj.bucket2.new_key(obj.b.name)
obj.new = obj.bucket2.new_key('new')
return obj
def get_bucket_key_names(bucket):
return frozenset(k.name for k in bucket.list())
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private')
@attr(assertion='public has no access to bucket or objects')
def test_access_bucket_private_object_private():
# all the test_access_* tests follow this template
obj = _setup_access(bucket_acl='private', object_acl='private')
# a should be public-read, b gets default (private)
# acled object read fail
check_access_denied(obj.a2.get_contents_as_string)
# acled object write fail
check_access_denied(obj.a2.set_contents_from_string, 'barcontent')
# default object read fail
check_access_denied(obj.b2.get_contents_as_string)
# default object write fail
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
# bucket read fail
check_access_denied(get_bucket_key_names, obj.bucket2)
# bucket write fail
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read')
@attr(assertion='public can only read readable object')
def test_access_bucket_private_object_publicread():
obj = _setup_access(bucket_acl='private', object_acl='public-read')
# a should be public-read, b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
check_access_denied(obj.a2.set_contents_from_string, 'foooverwrite')
check_access_denied(obj.b2.get_contents_as_string)
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
check_access_denied(get_bucket_key_names, obj.bucket2)
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write')
@attr(assertion='public can only read the readable object')
def test_access_bucket_private_object_publicreadwrite():
obj = _setup_access(bucket_acl='private', object_acl='public-read-write')
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
check_access_denied(obj.a2.set_contents_from_string, 'foooverwrite')
check_access_denied(obj.b2.get_contents_as_string)
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
check_access_denied(get_bucket_key_names, obj.bucket2)
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/private')
@attr(assertion='public can only list the bucket')
def test_access_bucket_publicread_object_private():
obj = _setup_access(bucket_acl='public-read', object_acl='private')
# a should be private, b gets default (private)
check_access_denied(obj.a2.get_contents_as_string)
check_access_denied(obj.a2.set_contents_from_string, 'barcontent')
check_access_denied(obj.b2.get_contents_as_string)
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicread():
obj = _setup_access(bucket_acl='public-read', object_acl='public-read')
# a should be public-read, b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
check_access_denied(obj.a2.set_contents_from_string, 'foooverwrite')
check_access_denied(obj.b2.get_contents_as_string)
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read-write')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicreadwrite():
obj = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
# a should be public-read-only ... because it is in a r/o bucket
# b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
check_access_denied(obj.a2.set_contents_from_string, 'foooverwrite')
check_access_denied(obj.b2.get_contents_as_string)
check_access_denied(obj.b2.set_contents_from_string, 'baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
check_access_denied(obj.new.set_contents_from_string, 'newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/private')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_private():
obj = _setup_access(bucket_acl='public-read-write', object_acl='private')
# a should be private, b gets default (private)
check_access_denied(obj.a2.get_contents_as_string)
obj.a2.set_contents_from_string('barcontent')
check_access_denied(obj.b2.get_contents_as_string)
obj.b2.set_contents_from_string('baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
obj.new.set_contents_from_string('newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicread():
obj = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
# a should be public-read, b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
obj.a2.set_contents_from_string('barcontent')
check_access_denied(obj.b2.get_contents_as_string)
obj.b2.set_contents_from_string('baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
obj.new.set_contents_from_string('newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read-write')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicreadwrite():
obj = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
# a should be public-read-write, b gets default (private)
eq(obj.a2.get_contents_as_string(), 'foocontent')
obj.a2.set_contents_from_string('foooverwrite')
check_access_denied(obj.b2.get_contents_as_string)
obj.b2.set_contents_from_string('baroverwrite')
eq(get_bucket_key_names(obj.bucket2), frozenset(['foo', 'bar']))
obj.new.set_contents_from_string('newcontent')
@attr(resource='object')
@attr(method='put')
@attr(operation='set object acls')
@attr(assertion='valid XML ACL sets properly')
def test_object_set_valid_acl():
XML_1 = '<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>' + config.main.user_id + '</ID></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>' + config.main.user_id + '</ID></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>'
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.set_xml_acl(XML_1)
@attr(resource='object')
@attr(method='put')
@attr(operation='set object acls')
@attr(assertion='invalid XML ACL fails 403')
def test_object_giveaway():
CORRECT_ACL = '<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>' + config.main.user_id + '</ID></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>' + config.main.user_id + '</ID></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>'
WRONG_ACL = '<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>' + config.alt.user_id + '</ID></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>' + config.alt.user_id + '</ID></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>'
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('bar')
key.set_xml_acl(CORRECT_ACL)
e = assert_raises(boto.exception.S3ResponseError, key.set_xml_acl, WRONG_ACL)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='returns all expected buckets')
def test_buckets_create_then_list():
create_buckets = [get_new_bucket() for i in xrange(5)]
list_buckets = s3.main.get_all_buckets()
names = frozenset(bucket.name for bucket in list_buckets)
for bucket in create_buckets:
if bucket.name not in names:
raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
# Common code to create a connection object, which'll use bad authorization information
def _create_connection_bad_auth(aws_access_key_id='badauth'):
# We're going to need to manually build a connection using bad authorization info.
# But to save the day, lets just hijack the settings from s3.main. :)
main = s3.main
conn = boto.s3.connection.S3Connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key='roflmao',
is_secure=main.is_secure,
port=main.port,
host=main.host,
calling_format=main.calling_format,
)
return conn
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (anonymous)')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_list_buckets_anonymous():
# Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
# emulating standard HTTP access.
#
# While it may have been possible to use httplib directly, doing it this way takes care of also
# allowing us to vary the calling format in testing.
conn = _create_connection_bad_auth()
conn._auth_handler = AnonymousAuth.AnonymousAuthHandler(None, None, None) # Doesn't need this
buckets = conn.get_all_buckets()
eq(len(buckets), 0)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_invalid_auth():
conn = _create_connection_bad_auth()
e = assert_raises(boto.exception.S3ResponseError, conn.get_all_buckets)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'InvalidAccessKeyId')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_bad_auth():
conn = _create_connection_bad_auth(aws_access_key_id=s3.main.aws_access_key_id)
e = assert_raises(boto.exception.S3ResponseError, conn.get_all_buckets)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with alphabetic works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
)
def test_bucket_create_naming_good_starts_alpha():
check_good_bucket_name('foo', _prefix='a'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with numeric works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
)
def test_bucket_create_naming_good_starts_digit():
check_good_bucket_name('foo', _prefix='0'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing dot works')
def test_bucket_create_naming_good_contains_period():
check_good_bucket_name('aaa.111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing hyphen works')
def test_bucket_create_naming_good_contains_hyphen():
check_good_bucket_name('aaa-111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket with objects and recreate it')
@attr(assertion='bucket recreation not overriding index')
def test_bucket_recreate_not_overriding():
key_names = ['mykey1', 'mykey2']
bucket = _create_keys(keys=key_names)
li = bucket.list()
names = [e.name for e in list(li)]
eq(names, key_names)
bucket2 = get_new_bucket(targets.main.default, bucket.name)
li = bucket.list()
names = [e.name for e in list(li)]
eq(names, key_names)
@attr(resource='object')
@attr(method='put')
@attr(operation='create and list objects with special names')
@attr(assertion='special names work')
def test_bucket_create_special_key_names():
key_names = [' ', '%', '_', '_ ', '_ _', '__']
bucket = _create_keys(keys=key_names)
li = bucket.list()
names = [e.name for e in list(li)]
eq(names, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create and list objects with underscore as prefix, list using prefix')
@attr(assertion='listing works correctly')
def test_bucket_list_special_prefix():
key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
bucket = _create_keys(keys=key_names)
li = bucket.get_all_keys()
eq(len(li), 5)
li2 = bucket.get_all_keys(prefix='_bla/')
eq(len(li2), 4)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy zero sized object in same bucket')
@attr(assertion='works')
def test_object_copy_zero_size():
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
fp_a = FakeWriteFile(0, '')
key.set_contents_from_file(fp_a)
key.copy(bucket, 'bar321foo')
key2 = bucket.get_key('bar321foo')
eq(key2.size, 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object in same bucket')
@attr(assertion='works')
def test_object_copy_same_bucket():
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
key.set_contents_from_string('foo')
key.copy(bucket, 'bar321foo')
key2 = bucket.get_key('bar321foo')
eq(key2.get_contents_as_string(), 'foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to itself')
@attr(assertion='fails')
def test_object_copy_to_itself():
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
key.set_contents_from_string('foo')
e = assert_raises(boto.exception.S3ResponseError, key.copy, bucket, 'foo123bar')
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidRequest')
@attr(resource='object')
@attr(method='put')
@attr(operation='modify object metadata by copying')
@attr(assertion='fails')
def test_object_copy_to_itself_with_metadata():
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
key.set_contents_from_string('foo')
key.copy(bucket, 'foo123bar', {'foo': 'bar'})
key.close()
bucket2 = s3.main.get_bucket(bucket.name)
key2 = bucket2.get_key('foo123bar')
md = key2.get_metadata('foo')
eq(md, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object from different bucket')
@attr(assertion='works')
def test_object_copy_diff_bucket():
buckets = [get_new_bucket(), get_new_bucket()]
key = buckets[0].new_key('foo123bar')
key.set_contents_from_string('foo')
key.copy(buckets[1], 'bar321foo')
key2 = buckets[1].get_key('bar321foo')
eq(key2.get_contents_as_string(), 'foo')
# is this a necessary check? a NoneType object is being touched here
# it doesn't get to the S3 level
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
buckets = [get_new_bucket(), get_new_bucket(targets.alt.default)]
print repr(buckets[1])
key = buckets[0].new_key('foo123bar')
key.set_contents_from_string('foo')
try:
key.copy(buckets[1], 'bar321foo')
except AttributeError:
pass
@attr(resource='object')
@attr(method='put')
@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
@attr(assertion='works')
def test_object_copy_not_owned_object_bucket():
bucket = get_new_bucket(targets.main.default)
key = bucket.new_key('foo123bar')
key.set_contents_from_string('foo')
bucket.add_user_grant(permission='FULL_CONTROL', user_id=config.alt.user_id, recursive=True)
k2 = s3.alt.get_bucket(bucket.name).get_key('foo123bar')
k2.copy(bucket.name, 'bar321foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and change acl')
@attr(assertion='works')
def test_object_copy_canned_acl():
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
key.set_contents_from_string('foo')
# use COPY directive
key2 = bucket.copy_key('bar321foo', bucket.name, 'foo123bar', headers={'x-amz-acl': 'public-read'})
res = _make_request('GET', bucket, key2)
eq(res.status, 200)
eq(res.reason, 'OK')
# use REPLACE directive
key3 = bucket.copy_key('bar321foo2', bucket.name, 'foo123bar', headers={'x-amz-acl': 'public-read'}, metadata={'abc': 'def'})
res = _make_request('GET', bucket, key3)
eq(res.status, 200)
eq(res.reason, 'OK')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and retain metadata')
def test_object_copy_retaining_metadata():
for size in [3, 1024 * 1024]:
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
metadata = {'key1': 'value1', 'key2': 'value2'}
key.set_metadata('key1', 'value1')
key.set_metadata('key2', 'value2')
content_type = 'audio/ogg'
key.content_type = content_type
key.set_contents_from_string(str(bytearray(size)))
bucket.copy_key('bar321foo', bucket.name, 'foo123bar')
key2 = bucket.get_key('bar321foo')
eq(key2.size, size)
eq(key2.metadata, metadata)
eq(key2.content_type, content_type)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and replace metadata')
def test_object_copy_replacing_metadata():
for size in [3, 1024 * 1024]:
bucket = get_new_bucket()
key = bucket.new_key('foo123bar')
key.set_metadata('key1', 'value1')
key.set_metadata('key2', 'value2')
key.content_type = 'audio/ogg'
key.set_contents_from_string(str(bytearray(size)))
metadata = {'key3': 'value3', 'key1': 'value4'}
content_type = 'audio/mpeg'
bucket.copy_key('bar321foo', bucket.name, 'foo123bar', metadata=metadata, headers={'Content-Type': content_type})
key2 = bucket.get_key('bar321foo')
eq(key2.size, size)
eq(key2.metadata, metadata)
eq(key2.content_type, content_type)
def transfer_part(bucket, mp_id, mp_keyname, i, part):
"""Transfer a part of a multipart upload. Designed to be run in parallel.
"""
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.key_name = mp_keyname
mp.id = mp_id
part_out = StringIO(part)
mp.upload_part_from_file(part_out, i+1)
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
yield s
if (x == size):
return
def _multipart_upload(bucket, s3_key_name, size, part_size=5*1024*1024, do_list=None, headers=None, metadata=None, resend_parts=[]):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
upload = bucket.initiate_multipart_upload(s3_key_name, headers=headers, metadata=metadata)
s = ''
for i, part in enumerate(generate_random(size, part_size)):
s += part
transfer_part(bucket, upload.id, upload.key_name, i, part)
if i in resend_parts:
transfer_part(bucket, upload.id, upload.key_name, i, part)
if do_list is not None:
l = bucket.list_multipart_uploads()
l = list(l)
return (upload, s)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart upload without parts')
def test_multipart_upload_empty():
bucket = get_new_bucket()
key = "mymultipart"
(upload, data) = _multipart_upload(bucket, key, 0)
e = assert_raises(boto.exception.S3ResponseError, upload.complete_upload)
eq(e.status, 400)
eq(e.error_code, u'MalformedXML')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart uploads with single small part')
def test_multipart_upload_small():
bucket = get_new_bucket()
key = "mymultipart"
size = 1
(upload, data) = _multipart_upload(bucket, key, size)
upload.complete_upload()
key2 = bucket.get_key(key)
eq(key2.size, size)
def _check_content_using_range(k, data, step):
objlen = k.size
for ofs in xrange(0, k.size, step):
toread = k.size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
read_range = k.get_contents_as_string(headers={'Range': 'bytes={s}-{e}'.format(s=ofs, e=end)})
eq(len(read_range), toread)
eq(read_range, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload():
bucket = get_new_bucket()
key="mymultipart"
content_type='text/bla'
objlen = 30 * 1024 * 1024
(upload, data) = _multipart_upload(bucket, key, objlen, headers={'Content-Type': content_type}, metadata={'foo': 'bar'})
upload.complete_upload()
result = _head_bucket(bucket)
eq(result.get('x-rgw-object-count', 1), 1)
eq(result.get('x-rgw-bytes-used', 30 * 1024 * 1024), 30 * 1024 * 1024)
k=bucket.get_key(key)
eq(k.metadata['foo'], 'bar')
eq(k.content_type, content_type)
test_string=k.get_contents_as_string()
eq(len(test_string), k.size)
eq(test_string, data)
_check_content_using_range(k, data, 1000000)
_check_content_using_range(k, data, 10000000)
def _check_upload_multipart_resend(bucket, key, objlen, resend_parts):
content_type='text/bla'
(upload, data) = _multipart_upload(bucket, key, objlen, headers={'Content-Type': content_type}, metadata={'foo': 'bar'}, resend_parts=resend_parts)
upload.complete_upload()
k=bucket.get_key(key)
eq(k.metadata['foo'], 'bar')
eq(k.content_type, content_type)
test_string=k.get_contents_as_string()
eq(k.size, len(test_string))
eq(k.size, objlen)
eq(test_string, data)
_check_content_using_range(k, data, 1000000)
_check_content_using_range(k, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multiple multi-part upload with different sizes')
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_resend_part():
bucket = get_new_bucket()
key="mymultipart"
objlen = 30 * 1024 * 1024
_check_upload_multipart_resend(bucket, key, objlen, [0])
_check_upload_multipart_resend(bucket, key, objlen, [1])
_check_upload_multipart_resend(bucket, key, objlen, [2])
_check_upload_multipart_resend(bucket, key, objlen, [1,2])
_check_upload_multipart_resend(bucket, key, objlen, [0,1,2,3,4,5])
@attr(assertion='successful')
def test_multipart_upload_multiple_sizes():
bucket = get_new_bucket()
key="mymultipart"
(upload, data) = _multipart_upload(bucket, key, 5 * 1024 * 1024)
upload.complete_upload()
(upload, data) = _multipart_upload(bucket, key, 5 * 1024 * 1024 + 100 * 1024)
upload.complete_upload()
(upload, data) = _multipart_upload(bucket, key, 5 * 1024 * 1024 + 600 * 1024)
upload.complete_upload()
(upload, data) = _multipart_upload(bucket, key, 10 * 1024 * 1024 + 100 * 1024)
upload.complete_upload()
(upload, data) = _multipart_upload(bucket, key, 10 * 1024 * 1024 + 600 * 1024)
upload.complete_upload()
(upload, data) = _multipart_upload(bucket, key, 10 * 1024 * 1024)
upload.complete_upload()
@attr(resource='object')
@attr(method='put')
@attr(operation='check failure on multiple multi-part upload with size too small')
@attr(assertion='fails 400')
def test_multipart_upload_size_too_small():
bucket = get_new_bucket()
key="mymultipart"
(upload, data) = _multipart_upload(bucket, key, 100 * 1024, part_size=10*1024)
e = assert_raises(boto.exception.S3ResponseError, upload.complete_upload)
eq(e.status, 400)
eq(e.error_code, u'EntityTooSmall')
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _do_test_multipart_upload_contents(bucket, key_name, num_parts):
payload=gen_rand_string(5)*1024*1024
mp=bucket.initiate_multipart_upload(key_name)
for i in range(0, num_parts):
mp.upload_part_from_file(StringIO(payload), i+1)
last_payload='123'*1024*1024
mp.upload_part_from_file(StringIO(last_payload), num_parts + 1)
mp.complete_upload()
key=bucket.get_key(key_name)
test_string=key.get_contents_as_string()
all_payload = payload*num_parts + last_payload
print 'JJJ', key_name, len(all_payload), len(test_string)
assert test_string == all_payload
return all_payload
@attr(resource='object')
@attr(method='put')
@attr(operation='check contents of multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_contents():
_do_test_multipart_upload_contents(get_new_bucket(), 'mymultipart', 3)
@attr(resource='object')
@attr(method='put')
@attr(operation=' multi-part upload overwrites existing key')
@attr(assertion='successful')
def test_multipart_upload_overwrite_existing_object():
bucket = get_new_bucket()
key_name="mymultipart"
payload='12345'*1024*1024
num_parts=2
key=bucket.new_key(key_name)
key.set_contents_from_string(payload)
mp=bucket.initiate_multipart_upload(key_name)
for i in range(0, num_parts):
mp.upload_part_from_file(StringIO(payload), i+1)
mp.complete_upload()
key=bucket.get_key(key_name)
test_string=key.get_contents_as_string()
assert test_string == payload*num_parts
@attr(resource='object')
@attr(method='put')
@attr(operation='abort multi-part upload')
@attr(assertion='successful')
def test_abort_multipart_upload():
bucket = get_new_bucket()
key="mymultipart"
(upload, data) = _multipart_upload(bucket, key, 10 * 1024 * 1024)
upload.cancel_upload()
result = _head_bucket(bucket)
eq(result.get('x-rgw-object-count', 0), 0)
eq(result.get('x-rgw-bytes-used', 0), 0)
def test_abort_multipart_upload_not_found():
bucket = get_new_bucket()
key="mymultipart"
e = assert_raises(boto.exception.S3ResponseError, bucket.cancel_multipart_upload, key, '1')
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchUpload')
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent multi-part uploads')
@attr(assertion='successful')
def test_list_multipart_upload():
bucket = get_new_bucket()
key="mymultipart"
mb = 1024 * 1024
(upload1, data) = _multipart_upload(bucket, key, 5 * mb, do_list = True)
(upload2, data) = _multipart_upload(bucket, key, 6 * mb, do_list = True)
key2="mymultipart2"
(upload3, data) = _multipart_upload(bucket, key2, 5 * mb, do_list = True)
l = bucket.list_multipart_uploads()
l = list(l)
index = dict([(key, 2), (key2, 1)])
for upload in l:
index[upload.key_name] -= 1;
for k, c in index.items():
eq(c, 0)
upload1.cancel_upload()
upload2.cancel_upload()
upload3.cancel_upload()
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with missing part')
def test_multipart_upload_missing_part():
bucket = get_new_bucket()
key_name = "mymultipart"
mp = bucket.initiate_multipart_upload(key_name)
mp.upload_part_from_file(StringIO('\x00'), 1)
xml = mp.to_xml()
xml = xml.replace('<PartNumber>1</PartNumber>', '<PartNumber>9999</PartNumber>')
e = assert_raises(boto.exception.S3ResponseError, bucket.complete_multipart_upload, key_name, mp.id, xml)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidPart')
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with incorrect ETag')
def test_multipart_upload_incorrect_etag():
bucket = get_new_bucket()
key_name = "mymultipart"
mp = bucket.initiate_multipart_upload(key_name)
mp.upload_part_from_file(StringIO('\x00'), 1)
xml = mp.to_xml()
xml = xml.replace('<ETag>"93b885adfe0da089cdf634904fd59f71"</ETag>', '<ETag>"ffffffffffffffffffffffffffffffff"</ETag>')
e = assert_raises(boto.exception.S3ResponseError, bucket.complete_multipart_upload, key_name, mp.id, xml)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidPart')
def _simple_http_req_100_cont(host, port, is_secure, method, resource):
"""
Send the specified request w/expect 100-continue
and await confirmation.
"""
req = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method=method,
resource=resource,
host=host,
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if is_secure:
s = ssl.wrap_socket(s);
s.settimeout(5)
s.connect((host, port))
s.send(req)
try:
data = s.recv(1024)
except socket.error, msg:
print 'got response: ', msg
print 'most likely server doesn\'t support 100-continue'
s.close()
l = data.split(' ')
assert l[0].startswith('HTTP')
return l[1]
@attr(resource='object')
@attr(method='put')
@attr(operation='w/expect continue')
@attr(assertion='succeeds if object is public-read-write')
@attr('100_continue')
@attr('fails_on_mod_proxy_fcgi')
def test_100_continue():
bucket = get_new_bucket()
objname = 'testobj'
resource = '/{bucket}/{obj}'.format(bucket=bucket.name, obj=objname)
status = _simple_http_req_100_cont(s3.main.host, s3.main.port, s3.main.is_secure, 'PUT', resource)
eq(status, '403')
bucket.set_acl('public-read-write')
status = _simple_http_req_100_cont(s3.main.host, s3.main.port, s3.main.is_secure, 'PUT', resource)
eq(status, '100')
def _test_bucket_acls_changes_persistent(bucket):
"""
set and verify readback of each possible permission
"""
perms = ('FULL_CONTROL', 'WRITE', 'WRITE_ACP', 'READ', 'READ_ACP')
for p in perms:
_build_bucket_acl_xml(p, bucket)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl set')
@attr(assertion='all permissions are persistent')
def test_bucket_acls_changes_persistent():
bucket = get_new_bucket()
_test_bucket_acls_changes_persistent(bucket);
@attr(resource='bucket')
@attr(method='put')
@attr(operation='repeated acl set')
@attr(assertion='all permissions are persistent')
def test_stress_bucket_acls_changes():
bucket = get_new_bucket()
for i in xrange(10):
_test_bucket_acls_changes_persistent(bucket);
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set cors')
@attr(assertion='succeeds')
def test_set_cors():
bucket = get_new_bucket()
cfg = CORSConfiguration()
cfg.add_rule('GET', '*.get')
cfg.add_rule('PUT', '*.put')
e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors)
eq(e.status, 404)
bucket.set_cors(cfg)
new_cfg = bucket.get_cors()
eq(len(new_cfg), 2)
result = bunch.Bunch()
for c in new_cfg:
eq(len(c.allowed_method), 1)
eq(len(c.allowed_origin), 1)
result[c.allowed_method[0]] = c.allowed_origin[0]
eq(result['GET'], '*.get')
eq(result['PUT'], '*.put')
bucket.delete_cors()
e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors)
eq(e.status, 404)
def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
r = func(url, headers=headers)
eq(r.status_code, expect_status)
assert r.headers['access-control-allow-origin'] == expect_allow_origin
assert r.headers['access-control-allow-methods'] == expect_allow_methods
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin header set')
@attr(assertion='returning cors header')
def test_cors_origin_response():
cfg = CORSConfiguration()
bucket = get_new_bucket()
bucket.set_acl('public-read')
cfg.add_rule('GET', '*suffix')
cfg.add_rule('GET', 'start*end')
cfg.add_rule('GET', 'prefix*')
cfg.add_rule('PUT', '*.put')
e = assert_raises(boto.exception.S3ResponseError, bucket.get_cors)
eq(e.status, 404)
bucket.set_cors(cfg)
time.sleep(3) # waiting, since if running against amazon data consistency model is not strict read-after-write
url = _get_post_url(s3.main, bucket)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 403, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, None, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
_cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
class FakeFile(object):
"""
file that simulates seek, tell, and current character
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = char
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
def tell(self):
return self.offset
class FakeWriteFile(FakeFile):
"""
file that simulates interruptable reads of constant data
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.size = size
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
count = min(size, self.size - self.offset)
self.offset += count
# Sneaky! do stuff before we return (the last time)
if self.interrupt != None and self.offset == self.size and count > 0:
self.interrupt()
return self.char*count
class FakeReadFile(FakeFile):
"""
file that simulates writes, interrupting after the second
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.interrupted = False
self.size = 0
self.expected_size = size
def write(self, chars):
eq(chars, self.char*len(chars))
self.offset += len(chars)
self.size += len(chars)
# Sneaky! do stuff on the second seek
if not self.interrupted and self.interrupt != None \
and self.offset > 0:
self.interrupt()
self.interrupted = True
def close(self):
eq(self.size, self.expected_size)
class FakeFileVerifier(object):
"""
file that verifies expected data has been written
"""
def __init__(self, char=None):
self.char = char
self.size = 0
def write(self, data):
size = len(data)
if self.char == None:
self.char = data[0]
self.size += size
eq(data, self.char*size)
def _verify_atomic_key_data(key, size=-1, char=None):
"""
Make sure file is of the expected size and (simulated) content
"""
fp_verify = FakeFileVerifier(char)
key.get_contents_to_file(fp_verify)
if size >= 0:
eq(fp_verify.size, size)
def _test_atomic_read(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket = get_new_bucket()
key = bucket.new_key('testobj')
# create object of <file_size> As
fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
read_conn = boto.s3.connection.S3Connection(
aws_access_key_id=s3['main'].aws_access_key_id,
aws_secret_access_key=s3['main'].aws_secret_access_key,
is_secure=s3['main'].is_secure,
port=s3['main'].port,
host=s3['main'].host,
calling_format=s3['main'].calling_format,
)
read_bucket = read_conn.get_bucket(bucket.name)
read_key = read_bucket.get_key('testobj')
fp_b = FakeWriteFile(file_size, 'B')
fp_a2 = FakeReadFile(file_size, 'A',
lambda: key.set_contents_from_file(fp_b)
)
# read object while writing it to it
read_key.get_contents_to_file(fp_a2)
fp_a2.close()
_verify_atomic_key_data(key, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='1MB successful')
def test_atomic_read_1mb():
_test_atomic_read(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='4MB successful')
def test_atomic_read_4mb():
_test_atomic_read(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='8MB successful')
def test_atomic_read_8mb():
_test_atomic_read(1024*1024*8)
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket = get_new_bucket()
objname = 'testobj'
key = bucket.new_key(objname)
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
# verify A's
_verify_atomic_key_data(key, file_size, 'A')
read_key = bucket.get_key(objname)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(read_key, file_size)
)
key.set_contents_from_file(fp_b)
# verify B's
_verify_atomic_key_data(key, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
def test_atomic_write_1mb():
_test_atomic_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='4MB successful')
def test_atomic_write_4mb():
_test_atomic_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='8MB successful')
def test_atomic_write_8mb():
_test_atomic_write(1024*1024*8)
def _test_atomic_dual_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket = get_new_bucket()
objname = 'testobj'
key = bucket.new_key(objname)
# get a second key object (for the same key)
# so both can be writing without interfering
key2 = bucket.new_key(objname)
# write <file_size> file of B's
# but before we're done, try to write all A's
fp_a = FakeWriteFile(file_size, 'A')
fp_b = FakeWriteFile(file_size, 'B',
lambda: key2.set_contents_from_file(fp_a, rewind=True)
)
key.set_contents_from_file(fp_b)
# verify the file
_verify_atomic_key_data(key, file_size)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
def test_atomic_dual_write_1mb():
_test_atomic_dual_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='4MB successful')
def test_atomic_dual_write_4mb():
_test_atomic_dual_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='8MB successful')
def test_atomic_dual_write_8mb():
_test_atomic_dual_write(1024*1024*8)
def _test_atomic_conditional_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket = get_new_bucket()
objname = 'testobj'
key = bucket.new_key(objname)
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
# verify A's
_verify_atomic_key_data(key, file_size, 'A')
read_key = bucket.get_key(objname)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(read_key, file_size)
)
key.set_contents_from_file(fp_b, headers={'If-Match': '*'})
# verify B's
_verify_atomic_key_data(key, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_conditional_write_1mb():
_test_atomic_conditional_write(1024*1024)
def _test_atomic_dual_conditional_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket = get_new_bucket()
objname = 'testobj'
key = bucket.new_key(objname)
fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
_verify_atomic_key_data(key, file_size, 'A')
etag_fp_a = key.etag.replace('"', '').strip()
# get a second key object (for the same key)
# so both can be writing without interfering
key2 = bucket.new_key(objname)
# write <file_size> file of C's
# but before we're done, try to write all B's
fp_b = FakeWriteFile(file_size, 'B')
fp_c = FakeWriteFile(file_size, 'C',
lambda: key2.set_contents_from_file(fp_b, rewind=True, headers={'If-Match': etag_fp_a})
)
# key.set_contents_from_file(fp_c, headers={'If-Match': etag_fp_a})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_c,
headers={'If-Match': etag_fp_a})
eq(e.status, 412)
eq(e.reason, 'Precondition Failed')
eq(e.error_code, 'PreconditionFailed')
# verify the file
_verify_atomic_key_data(key, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_dual_conditional_write_1mb():
_test_atomic_dual_conditional_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write file in deleted bucket')
@attr(assertion='fail 404')
@attr('fails_on_aws')
def test_atomic_write_bucket_gone():
bucket = get_new_bucket()
def remove_bucket():
bucket.delete()
# create file of A's but delete the bucket it's in before we finish writing
# all of them
key = bucket.new_key('foo')
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_a)
eq(e.status, 404)
eq(e.reason, 'Not Found')
eq(e.error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_response_code():
content = 'testcontent'
bucket = get_new_bucket()
key = bucket.new_key('testobj')
key.set_contents_from_string(content)
key.open('r', headers={'Range': 'bytes=4-7'})
status = key.resp.status
content_range = key.resp.getheader('Content-Range')
fetched_content = ''
for data in key:
fetched_content += data;
key.close()
eq(fetched_content, content[4:8])
eq(status, 206)
eq(content_range, 'bytes 4-7/11')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_skip_leading_bytes_response_code():
content = 'testcontent'
bucket = get_new_bucket()
key = bucket.new_key('testobj')
key.set_contents_from_string(content)
# test trailing bytes
key.open('r', headers={'Range': 'bytes=4-'})
status = key.resp.status
content_range = key.resp.getheader('Content-Range')
fetched_content = ''
for data in key:
fetched_content += data;
key.close()
eq(fetched_content, content[4:])
eq(status, 206)
eq(content_range, 'bytes 4-10/11')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_return_trailing_bytes_response_code():
content = 'testcontent'
bucket = get_new_bucket()
key = bucket.new_key('testobj')
key.set_contents_from_string(content)
# test leading bytes
key.open('r', headers={'Range': 'bytes=-7'})
status = key.resp.status
content_range = key.resp.getheader('Content-Range')
fetched_content = ''
for data in key:
fetched_content += data;
key.close()
eq(fetched_content, content[-7:])
eq(status, 206)
eq(content_range, 'bytes 4-10/11')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_invalid_range():
content = 'testcontent'
bucket = get_new_bucket()
key = bucket.new_key('testobj')
key.set_contents_from_string(content)
# test invalid range
e = assert_raises(boto.exception.S3ResponseError, key.open, 'r', headers={'Range': 'bytes=40-50'})
eq(e.status, 416)
eq(e.error_code, 'InvalidRange')
def check_can_test_multiregion():
if not targets.main.master or len(targets.main.secondaries) == 0:
raise SkipTest
def create_presigned_url(conn, method, bucket_name, key_name, expiration):
return conn.generate_url(expires_in=expiration,
method=method,
bucket=bucket_name,
key=key_name,
query_auth=True,
)
def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
print url
h = httplib2.Http()
h.follow_redirects = follow_redirects
return h.request(url, method)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create on one region, access in another')
@attr(assertion='can\'t access in other region')
@attr('multiregion')
def test_region_bucket_create_secondary_access_remove_master():
check_can_test_multiregion()
master_conn = targets.main.master.connection
for r in targets.main.secondaries:
conn = r.connection
bucket = get_new_bucket(r)
r, content = send_raw_http_request(master_conn, 'GET', bucket.name, '', follow_redirects = False)
eq(r.status, 301)
r, content = send_raw_http_request(master_conn, 'DELETE', bucket.name, '', follow_redirects = False)
eq(r.status, 301)
conn.delete_bucket(bucket)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create on one region, access in another')
@attr(assertion='can\'t access in other region')
@attr('multiregion')
def test_region_bucket_create_master_access_remove_secondary():
check_can_test_multiregion()
master = targets.main.master
master_conn = master.connection
for r in targets.main.secondaries:
conn = r.connection
bucket = get_new_bucket(master)
region_sync_meta(targets.main, master)
r, content = send_raw_http_request(conn, 'GET', bucket.name, '', follow_redirects = False)
eq(r.status, 301)
r, content = send_raw_http_request(conn, 'DELETE', bucket.name, '', follow_redirects = False)
eq(r.status, 301)
master_conn.delete_bucket(bucket)
region_sync_meta(targets.main, master)
e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name)
eq(e.status, 404)
e = assert_raises(boto.exception.S3ResponseError, master_conn.get_bucket, bucket.name)
eq(e.status, 404)
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy object between regions, verify')
@attr(assertion='can read object')
@attr('multiregion')
def test_region_copy_object():
check_can_test_multiregion()
for (k, dest) in targets.main.iteritems():
dest_conn = dest.connection
dest_bucket = get_new_bucket(dest)
print 'created new dest bucket ', dest_bucket.name
region_sync_meta(targets.main, dest)
if is_slow_backend():
sizes = (1024, 10 * 1024 * 1024)
else:
sizes = (1024, 10 * 1024 * 1024, 100 * 1024 * 1024)
for file_size in sizes:
for (k2, r) in targets.main.iteritems():
if r == dest_conn:
continue
conn = r.connection
bucket = get_new_bucket(r)
print 'created bucket', bucket.name
region_sync_meta(targets.main, r)
content = 'testcontent'
print 'creating key=testobj', 'bucket=',bucket.name
key = bucket.new_key('testobj')
fp_a = FakeWriteFile(file_size, 'A')
key.set_contents_from_file(fp_a)
print 'calling region_sync_meta'
region_sync_meta(targets.main, r)
print 'dest_bucket=', dest_bucket.name, 'key=', key.name
dest_key = dest_bucket.copy_key('testobj-dest', bucket.name, key.name)
print
# verify dest
_verify_atomic_key_data(dest_key, file_size, 'A')
bucket.delete_key(key.name)
# confirm that the key was deleted as expected
region_sync_meta(targets.main, r)
temp_key = bucket.get_key(key.name)
assert temp_key == None
print 'removing bucket', bucket.name
conn.delete_bucket(bucket)
# confirm that the bucket was deleted as expected
region_sync_meta(targets.main, r)
e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name)
eq(e.status, 404)
e = assert_raises(boto.exception.S3ResponseError, dest_conn.get_bucket, bucket.name)
eq(e.status, 404)
# confirm that the key was deleted as expected
dest_bucket.delete_key(dest_key.name)
temp_key = dest_bucket.get_key(dest_key.name)
assert temp_key == None
dest_conn.delete_bucket(dest_bucket)
region_sync_meta(targets.main, dest)
# ensure that dest_bucket was deleted on this host and all other hosts
e = assert_raises(boto.exception.S3ResponseError, dest_conn.get_bucket, dest_bucket.name)
eq(e.status, 404)
for (k2, r) in targets.main.iteritems():
if r == dest_conn:
continue
conn = r.connection
e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, dest_bucket.name)
eq(e.status, 404)
def check_versioning(bucket, status):
try:
eq(bucket.get_versioning_status()['Versioning'], status)
except KeyError:
eq(status, None)
# amazon is eventual consistent, retry a bit if failed
def check_configure_versioning_retry(bucket, status, expected_string):
bucket.configure_versioning(status)
read_status = None
for i in xrange(5):
try:
read_status = bucket.get_versioning_status()['Versioning']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='bucket')
@attr(method='create')
@attr(operation='create versioned bucket')
@attr(assertion='can create and suspend bucket versioning')
@attr('versioning')
def test_versioning_bucket_create_suspend():
bucket = get_new_bucket()
check_versioning(bucket, None)
check_configure_versioning_retry(bucket, False, "Suspended")
check_configure_versioning_retry(bucket, True, "Enabled")
check_configure_versioning_retry(bucket, True, "Enabled")
check_configure_versioning_retry(bucket, False, "Suspended")
def check_head_obj_content(key, content):
if content is not None:
eq(key.get_contents_as_string(), content)
else:
print 'check head', key
eq(key, None)
def check_obj_content(key, content):
if content is not None:
eq(key.get_contents_as_string(), content)
else:
eq(isinstance(key, boto.s3.deletemarker.DeleteMarker), True)
def check_obj_versions(bucket, objname, keys, contents):
# check to see if object is pointing at correct version
key = bucket.get_key(objname)
if len(contents) > 0:
print 'testing obj head', objname
check_head_obj_content(key, contents[-1])
i = len(contents)
for key in bucket.list_versions():
if key.name != objname:
continue
i -= 1
eq(keys[i].version_id or 'null', key.version_id)
print 'testing obj version-id=', key.version_id
check_obj_content(key, contents[i])
else:
eq(key, None)
def create_multiple_versions(bucket, objname, num_versions, k = None, c = None):
c = c or []
k = k or []
for i in xrange(num_versions):
c.append('content-{i}'.format(i=i))
key = bucket.new_key(objname)
key.set_contents_from_string(c[i])
if i == 0:
check_configure_versioning_retry(bucket, True, "Enabled")
k_pos = len(k)
i = 0
for o in bucket.list_versions():
if o.name != objname:
continue
i += 1
if i > num_versions:
break
print o, o.version_id
k.insert(k_pos, o)
print 'created obj name=', objname, 'version-id=', o.version_id
eq(len(k), len(c))
for j in xrange(num_versions):
print j, k[j], k[j].version_id
check_obj_versions(bucket, objname, k, c)
return (k, c)
def remove_obj_version(bucket, k, c, i):
# check by versioned key
i = i % len(k)
rmkey = k.pop(i)
content = c.pop(i)
if (not rmkey.delete_marker):
eq(rmkey.get_contents_as_string(), content)
# remove version
print 'removing version_id=', rmkey.version_id
bucket.delete_key(rmkey.name, version_id = rmkey.version_id)
check_obj_versions(bucket, rmkey.name, k, c)
def remove_obj_head(bucket, objname, k, c):
print 'removing obj=', objname
key = bucket.delete_key(objname)
k.append(key)
c.append(None)
eq(key.delete_marker, True)
check_obj_versions(bucket, objname, k, c)
def _do_test_create_remove_versions(bucket, objname, num_versions, remove_start_idx, idx_inc):
(k, c) = create_multiple_versions(bucket, objname, num_versions)
idx = remove_start_idx
for j in xrange(num_versions):
remove_obj_version(bucket, k, c, idx)
idx += idx_inc
def _do_remove_versions(bucket, objname, remove_start_idx, idx_inc, head_rm_ratio, k, c):
idx = remove_start_idx
r = 0
total = len(k)
for j in xrange(total):
r += head_rm_ratio
if r >= 1:
r %= 1
remove_obj_head(bucket, objname, k, c)
else:
remove_obj_version(bucket, k, c, idx)
idx += idx_inc
check_obj_versions(bucket, objname, k, c)
def _do_test_create_remove_versions_and_head(bucket, objname, num_versions, num_ops, remove_start_idx, idx_inc, head_rm_ratio):
(k, c) = create_multiple_versions(bucket, objname, num_versions)
_do_remove_versions(bucket, objname, remove_start_idx, idx_inc, head_rm_ratio, k, c)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove():
bucket = get_new_bucket()
objname = 'testobj'
num_vers = 5
_do_test_create_remove_versions(bucket, objname, num_vers, -1, 0)
_do_test_create_remove_versions(bucket, objname, num_vers, -1, 0)
_do_test_create_remove_versions(bucket, objname, num_vers, 0, 0)
_do_test_create_remove_versions(bucket, objname, num_vers, 1, 0)
_do_test_create_remove_versions(bucket, objname, num_vers, 4, -1)
_do_test_create_remove_versions(bucket, objname, num_vers, 3, 3)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object and head')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove_head():
bucket = get_new_bucket()
objname = 'testobj'
num_vers = 5
_do_test_create_remove_versions_and_head(bucket, objname, num_vers, num_vers * 2, -1, 0, 0.5)
def is_null_key(k):
return (k.version_id is None) or (k.version_id == 'null')
def delete_suspended_versioning_obj(bucket, objname, k, c):
key = bucket.delete_key(objname)
i = 0
while i < len(k):
if is_null_key(k[i]):
k.pop(i)
c.pop(i)
else:
i += 1
key.version_id = "null"
k.append(key)
c.append(None)
check_obj_versions(bucket, objname, k, c)
def overwrite_suspended_versioning_obj(bucket, objname, k, c, content):
key = bucket.new_key(objname)
key.set_contents_from_string(content)
i = 0
while i < len(k):
print 'kkk', i, k[i], k[i].version_id
if is_null_key(k[i]):
print 'null key!'
k.pop(i)
c.pop(i)
else:
i += 1
k.append(key)
c.append(content)
check_obj_versions(bucket, objname, k, c)
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions():
bucket = get_new_bucket()
check_versioning(bucket, None)
check_configure_versioning_retry(bucket, True, "Enabled")
num_versions = 5
objname = 'testobj'
(k, c) = create_multiple_versions(bucket, objname, num_versions)
check_configure_versioning_retry(bucket, False, "Suspended")
delete_suspended_versioning_obj(bucket, objname, k, c)
delete_suspended_versioning_obj(bucket, objname, k, c)
overwrite_suspended_versioning_obj(bucket, objname, k, c, 'null content 1')
overwrite_suspended_versioning_obj(bucket, objname, k, c, 'null content 2')
delete_suspended_versioning_obj(bucket, objname, k, c)
overwrite_suspended_versioning_obj(bucket, objname, k, c, 'null content 3')
delete_suspended_versioning_obj(bucket, objname, k, c)
check_configure_versioning_retry(bucket, True, "Enabled")
(k, c) = create_multiple_versions(bucket, objname, 3, k, c)
_do_remove_versions(bucket, objname, 0, 5, 0.5, k, c)
_do_remove_versions(bucket, objname, 0, 5, 0, k, c)
eq(len(k), 0)
eq(len(k), len(c))
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions_simple():
bucket = get_new_bucket()
check_versioning(bucket, None)
check_configure_versioning_retry(bucket, True, "Enabled")
num_versions = 1
objname = 'testobj'
(k, c) = create_multiple_versions(bucket, objname, num_versions)
check_configure_versioning_retry(bucket, False, "Suspended")
delete_suspended_versioning_obj(bucket, objname, k, c)
check_configure_versioning_retry(bucket, True, "Enabled")
(k, c) = create_multiple_versions(bucket, objname, 1, k, c)
for i in xrange(len(k)):
print 'JJJ: ', k[i].version_id, c[i]
_do_remove_versions(bucket, objname, 0, 0, 0.5, k, c)
_do_remove_versions(bucket, objname, 0, 0, 0, k, c)
eq(len(k), 0)
eq(len(k), len(c))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_all():
bucket = get_new_bucket()
check_versioning(bucket, None)
check_configure_versioning_retry(bucket, True, "Enabled")
num_versions = 10
objname = 'testobj'
(k, c) = create_multiple_versions(bucket, objname, num_versions)
_do_remove_versions(bucket, objname, 0, 5, 0.5, k, c)
_do_remove_versions(bucket, objname, 0, 5, 0, k, c)
eq(len(k), 0)
eq(len(k), len(c))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_special_names():
bucket = get_new_bucket()
check_versioning(bucket, None)
check_configure_versioning_retry(bucket, True, "Enabled")
num_versions = 10
objnames = ['_testobj', '_', ':', ' ']
for objname in objnames:
(k, c) = create_multiple_versions(bucket, objname, num_versions)
_do_remove_versions(bucket, objname, 0, 5, 0.5, k, c)
_do_remove_versions(bucket, objname, 0, 5, 0, k, c)
eq(len(k), 0)
eq(len(k), len(c))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test multipart object')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_overwrite_multipart():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
objname = 'testobj'
c = []
num_vers = 3
for i in xrange(num_vers):
c.append(_do_test_multipart_upload_contents(bucket, objname, 3))
k = []
for key in bucket.list_versions():
k.insert(0, key)
eq(len(k), num_vers)
check_obj_versions(bucket, objname, k, c)
_do_remove_versions(bucket, objname, 0, 3, 0.5, k, c)
_do_remove_versions(bucket, objname, 0, 3, 0, k, c)
eq(len(k), 0)
eq(len(k), len(c))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='list versioned objects')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_list_marker():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
objname = 'testobj'
objname2 = 'testobj-1'
num_vers = 5
(k, c) = create_multiple_versions(bucket, objname, num_vers)
(k2, c2) = create_multiple_versions(bucket, objname2, num_vers)
k.reverse()
k2.reverse()
allkeys = k + k2
names = []
for key1, key2 in itertools.izip_longest(bucket.list_versions(), allkeys):
eq(key1.version_id, key2.version_id)
names.append(key1.name)
for i in xrange(len(allkeys)):
for key1, key2 in itertools.izip_longest(bucket.list_versions(key_marker=names[i], version_id_marker=allkeys[i].version_id), allkeys[i+1:]):
eq(key1.version_id, key2.version_id)
# with nonexisting version id, skip to next object
for key1, key2 in itertools.izip_longest(bucket.list_versions(key_marker=objname, version_id_marker='nosuchversion'), allkeys[5:]):
eq(key1.version_id, key2.version_id)
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test versioned object copying')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_copy_obj_version():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
num_versions = 3
objname = 'testobj'
(k, c) = create_multiple_versions(bucket, objname, num_versions)
# copy into the same bucket
for i in xrange(num_versions):
new_key_name = 'key_{i}'.format(i=i)
new_key = bucket.copy_key(new_key_name, bucket.name, k[i].name, src_version_id=k[i].version_id)
eq(new_key.get_contents_as_string(), c[i])
another_bucket = get_new_bucket()
# copy into a different bucket
for i in xrange(num_versions):
new_key_name = 'key_{i}'.format(i=i)
new_key = another_bucket.copy_key(new_key_name, bucket.name, k[i].name, src_version_id=k[i].version_id)
eq(new_key.get_contents_as_string(), c[i])
# test copy of head object
new_key = another_bucket.copy_key('new_key', bucket.name, objname)
eq(new_key.get_contents_as_string(), c[num_versions - 1])
def _count_bucket_versioned_objs(bucket):
k = []
for key in bucket.list_versions():
k.insert(0, key)
return len(k)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object with a single call')
@attr('versioning')
def test_versioning_multi_object_delete():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'key'
key0 = bucket.new_key(keyname)
key0.set_contents_from_string('foo')
key1 = bucket.new_key(keyname)
key1.set_contents_from_string('bar')
stored_keys = []
for key in bucket.list_versions():
stored_keys.insert(0, key)
eq(len(stored_keys), 2)
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 2)
eq(len(result.errors), 0)
eq(_count_bucket_versioned_objs(bucket), 0)
# now remove again, should all succeed due to idempotency
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 2)
eq(len(result.errors), 0)
eq(_count_bucket_versioned_objs(bucket), 0)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'key'
key0 = bucket.new_key(keyname)
key0.set_contents_from_string('foo')
key1 = bucket.new_key(keyname)
key1.set_contents_from_string('bar')
key2 = bucket.delete_key(keyname)
eq(key2.delete_marker, True)
stored_keys = []
for key in bucket.list_versions():
stored_keys.insert(0, key)
eq(len(stored_keys), 3)
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 3)
eq(len(result.errors), 0)
eq(_count_bucket_versioned_objs(bucket), 0)
delete_markers = []
for o in result.deleted:
if o.delete_marker:
delete_markers.insert(0, o)
eq(len(delete_markers), 1)
eq(key2.version_id, delete_markers[0].version_id)
# now remove again, should all succeed due to idempotency
result = bucket.delete_keys(stored_keys)
eq(len(result.deleted), 3)
eq(len(result.errors), 0)
eq(_count_bucket_versioned_objs(bucket), 0)
@attr(resource='object')
@attr(method='delete')
@attr(operation='multi delete create marker')
@attr(assertion='returns correct marker version id')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker_create():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'key'
rmkeys = [ bucket.new_key(keyname) ]
eq(_count_bucket_versioned_objs(bucket), 0)
result = bucket.delete_keys(rmkeys)
eq(len(result.deleted), 1)
eq(_count_bucket_versioned_objs(bucket), 1)
delete_markers = []
for o in result.deleted:
if o.delete_marker:
delete_markers.insert(0, o)
eq(len(delete_markers), 1)
for o in bucket.list_versions():
eq(o.name, keyname)
eq(o.version_id, delete_markers[0].delete_marker_version_id)
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object version changes specific version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'foo'
key0 = bucket.new_key(keyname)
key0.set_contents_from_string('bar')
key1 = bucket.new_key(keyname)
key1.set_contents_from_string('bla')
key2 = bucket.new_key(keyname)
key2.set_contents_from_string('zxc')
stored_keys = []
for key in bucket.list_versions():
stored_keys.insert(0, key)
k1 = stored_keys[1]
policy = bucket.get_acl(key_name=k1.name, version_id=k1.version_id)
default_policy = [
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
]
print repr(policy)
check_grants(policy.acl.grants, default_policy)
bucket.set_canned_acl('public-read', key_name=k1.name, version_id=k1.version_id)
policy = bucket.get_acl(key_name=k1.name, version_id=k1.version_id)
print repr(policy)
check_grants(
policy.acl.grants,
[
dict(
permission='FULL_CONTROL',
id=policy.owner.id,
display_name=policy.owner.display_name,
uri=None,
email_address=None,
type='CanonicalUser',
),
dict(
permission='READ',
id=None,
display_name=None,
uri='http://acs.amazonaws.com/groups/global/AllUsers',
email_address=None,
type='Group',
),
],
)
k = bucket.new_key(keyname)
check_grants(k.get_acl().acl.grants, default_policy)
def _do_create_object(bucket, objname, i):
k = bucket.new_key(objname)
k.set_contents_from_string('data {i}'.format(i=i))
def _do_remove_ver(bucket, obj):
bucket.delete_key(obj.name, version_id = obj.version_id)
def _do_create_versioned_obj_concurrent(bucket, objname, num):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(bucket, objname, i))
thr.start()
t.append(thr)
return t
def _do_clear_versioned_bucket_concurrent(bucket):
t = []
for o in bucket.list_versions():
thr = threading.Thread(target = _do_remove_ver, args=(bucket, o))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_concurrent_remove():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'myobj'
num_objs = 5
for i in xrange(5):
t = _do_create_versioned_obj_concurrent(bucket, keyname, num_objs)
_do_wait_completion(t)
eq(_count_bucket_versioned_objs(bucket), num_objs)
eq(len(bucket.get_all_keys()), 1)
t = _do_clear_versioned_bucket_concurrent(bucket)
_do_wait_completion(t)
eq(_count_bucket_versioned_objs(bucket), 0)
eq(len(bucket.get_all_keys()), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation and removal of objects')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_and_remove():
bucket = get_new_bucket()
check_configure_versioning_retry(bucket, True, "Enabled")
keyname = 'myobj'
num_objs = 3
all_threads = []
for i in xrange(3):
t = _do_create_versioned_obj_concurrent(bucket, keyname, num_objs)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(bucket)
all_threads.append(t)
for t in all_threads:
_do_wait_completion(t)
t = _do_clear_versioned_bucket_concurrent(bucket)
_do_wait_completion(t)
eq(_count_bucket_versioned_objs(bucket), 0)
eq(len(bucket.get_all_keys()), 0)
| mit | 6,290,664,039,525,795,000 | 30.922219 | 425 | 0.645011 | false |
pachterlab/kma | inst/pre-process/tests/test_intron_ops.py | 1 | 19345 | # intron ops
# Copyright (C) 2015 Harold Pimentel
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import unittest
from gtf_parser import *
from intron_ops import *
class TestIntronOps(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_reduce_to_gene(self):
gtf_dict = gtf_parse('/Users/hjp/lmcb/unittests/3gene.gtf')
gtf_list = sorted(gtf_dict.values(), key = lambda x: x.front_coordinate)
g2t = reduce_to_gene(gtf_list)
self.assertEqual(len(g2t), 3)
self.assertEqual(len(g2t['ENSG00000124193']), 2)
self.assertEqual(len(g2t['ENSG00000078399']), 5)
self.assertEqual(len(g2t['ENSG00000151303']), 1)
g2t, gene_len = reduce_to_gene(gtf_list, lambda x: len(x))
self.assertEqual(gene_len['ENSG00000124193'], 2)
self.assertEqual(gene_len['ENSG00000078399'], 5)
self.assertEqual(gene_len['ENSG00000151303'], 1)
def test_get_introns(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((15, 20))
introns = get_introns(t1)
self.assertEqual( len(introns), 1)
self.assertEqual( introns[0], Intron('chr', 10, 15) )
t1.add_exon((21, 30) )
introns = get_introns(t1)
self.assertEqual(introns, [Intron('chr', 10, 15),
Intron('chr', 20, 21)])
# FIXME: after class Intron was introduced, broke this... not too difficult
# to fix..
# def test_intron_all_trans(self):
# t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
# t1.add_exon((3, 10))
# t1.add_exon((15, 20))
# t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
# t2.add_exon((3, 10))
# t2.add_exon((15, 20))
# t2.add_exon((21, 30) )
# introns = intron_all_trans([t1, t2])
# self.assertEqual(introns, [Intron('chr', 10, 15)])
# self.assertEqual(intron_all_trans([]), [])
def test_intron_intersection(self):
i1 = (3, 10)
i2 = (2, 8)
self.assertEqual(intron_intersection(i1, i2), (3,8))
i2 = (10, 12)
self.assertEqual(intron_intersection(i1, i2), None)
# def test_intron_all_junction_left(self):
# t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
# t1.add_exon((3, 10))
# t1.add_exon((15, 20))
# t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
# t2.add_exon((3, 10))
# t2.add_exon((18, 25))
# intronic_regions = intron_all_junction_left([t1, t2])
# self.assertEqual(intronic_regions, [(10, 15)])
# t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
# t3.add_exon((3, 10))
# t3.add_exon((16, 23))
# intronic_regions = intron_all_junction_left([t1, t2, t3])
# self.assertEqual(intronic_regions, [(10, 15)])
# # this intron shouldn't match on the left side
# t1.add_exon((40, 44))
# t2.add_exon((40, 44))
# t3.add_exon((40, 44))
# intronic_regions = intron_all_junction_left([t1, t2, t3])
# self.assertEqual(intronic_regions, [(10, 15)])
# # this intron should match on the left side
# t1.add_exon((49, 57))
# t2.add_exon((47, 53))
# t3.add_exon((47, 54))
# intronic_regions = intron_all_junction_left([t1, t2, t3])
# self.assertEqual(intronic_regions, [(10, 15),
# (44, 47)])
# def test_intron_all_junction_left_exon_overlap(self):
# # test case where exon overlaps introns
# t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
# t1.add_exon((3, 10))
# t1.add_exon((15, 20))
# t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
# t2.add_exon((3, 10))
# t2.add_exon((18, 25))
# t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
# t3.add_exon((3, 25))
# intronic_regions = intron_all_junction_left([t1, t2, t3])
# self.assertEqual(intronic_regions, [])
# def test_intron_all_junction_left_no_overlap(self):
# t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
# t1.add_exon((3, 10))
# t1.add_exon((15, 20))
# t1.add_exon((30, 40))
# t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
# t2.add_exon((18, 25))
# t2.add_exon((30, 40))
# t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
# t3.add_exon((18, 25))
# t3.add_exon((30, 40))
# intronic_regions = intron_all_junction_left([t1, t2, t3])
# self.assertEqual(intronic_regions, [(10, 15)])
# intronic_regions = intron_all_junction_left([t2, t1, t3])
# self.assertEqual(intronic_regions, [(10, 15)])
# def test_intron_all_junction_left_gtf(self):
# trans = gtf_parse('tests/inputs/refGene_07.23.2014_CHL1.gtf')
# trans = sorted(trans.values(), key = lambda x: x.front_coordinate)
# gene_to_trans, gene_to_introns = \
# reduce_to_gene(trans, intron_all_junction_left)
# self.assertEqual(gene_to_introns['CHL1'][0], (238746, 239325))
# self.assertEqual(gene_to_introns['CHL1'][1], (286375, 288250))
# self.assertEqual(gene_to_introns['CHL1'][2], (361550, 367641))
# self.assertEqual(gene_to_introns['CHL1'][3], (367747, 369849))
# self.assertEqual(gene_to_introns['CHL1'][4], (370037, 382476))
# self.assertEqual(gene_to_introns['CHL1'][5], (382599, 383594))
# self.assertEqual(gene_to_introns['CHL1'][6], (383765, 384666))
# self.assertEqual(gene_to_introns['CHL1'][7], (386392, 391041))
# self.assertEqual(gene_to_introns['CHL1'][8], (391226, 396322))
# self.assertEqual(gene_to_introns['CHL1'][9], (396454, 401966))
# self.assertEqual(gene_to_introns['CHL1'][10], (402107, 403381))
# self.assertEqual(gene_to_introns['CHL1'][11], (403493, 404899))
# self.assertEqual(gene_to_introns['CHL1'][12], (405066, 407632))
# self.assertEqual(gene_to_introns['CHL1'][13], (407798, 419500))
# self.assertEqual(gene_to_introns['CHL1'][14], (419625, 423861))
# self.assertEqual(gene_to_introns['CHL1'][15], (423963, 424156))
# self.assertEqual(gene_to_introns['CHL1'][16], (424354, 425498))
# self.assertEqual(gene_to_introns['CHL1'][17], (425569, 430934))
# self.assertEqual(gene_to_introns['CHL1'][18], (431157, 432383))
# self.assertEqual(gene_to_introns['CHL1'][19], (432499, 432637))
# self.assertEqual(gene_to_introns['CHL1'][20], (432842, 433357))
# self.assertEqual(gene_to_introns['CHL1'][21], (433480, 436375))
# self.assertEqual(gene_to_introns['CHL1'][22], (436555, 439909))
# self.assertEqual(gene_to_introns['CHL1'][23], (440831, 443308))
# self.assertEqual(gene_to_introns['CHL1'][24], (443381, 447177))
def test_get_introns_gtf(self):
trans = gtf_parse('tests/inputs/refGene_07.23.2014_CHL1.gtf')
introns = get_introns(trans['NM_001253388'])
self.assertEqual(len(introns), 24)
def test_transcript_union_simple2(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((15, 20))
t1.add_exon((30, 40))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((18, 25))
t2.add_exon((30, 40))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3, 10), (15, 25), (30, 40)])
tu = transcript_union([t2, t1]).exons
self.assertEqual(tu, [(3, 10), (15, 25), (30, 40)])
t2.add_exon((45, 50))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3, 10), (15, 25), (30, 40), (45, 50)])
t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
t3.add_exon((18, 25))
t3.add_exon((30, 40))
tu = transcript_union([t1, t2, t3]).exons
self.assertEqual(tu, [(3, 10), (15, 25), (30, 40), (45, 50)])
t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
t3.add_exon((12, 25))
t3.add_exon((30, 40))
tu = transcript_union([t1, t2, t3]).exons
self.assertEqual(tu, [(3, 10), (12, 25), (30, 40), (45, 50)])
t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
t3.add_exon((9, 25))
t3.add_exon((30, 40))
tu = transcript_union([t1, t2, t3]).exons
self.assertEqual(tu, [(3, 25), (30, 40), (45, 50)])
def test_transcript_union_no_intron(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((15, 20))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((10,15))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3,20)])
def test_transcript_union_internal_exon(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((15, 20))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((11,15))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3,10), (11, 20)])
def test_transcript_union_disjoint(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((15, 20))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((25, 30))
t2.add_exon((40, 45))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3,10), (15, 20), (25, 30), (40, 45)])
def test_transcript_union_gtf(self):
trans = gtf_parse('tests/inputs/refGene_07.23.2014_CHL1.gtf')
trans = sorted(trans.values(), key = lambda x: x.front_coordinate)
gene_to_trans, gene_to_union = \
reduce_to_gene(trans, transcript_union)
chl1 = gene_to_union['CHL1'].exons
self.assertEqual(chl1[0], (238278, 238746))
self.assertEqual(chl1[1], (239325, 239775))
self.assertEqual(chl1[2], (286295, 286375))
self.assertEqual(chl1[3], (288250, 290282))
self.assertEqual(chl1[4], (361365, 361550))
self.assertEqual(chl1[5], (367641, 367747))
self.assertEqual(chl1[6], (369849, 370037))
self.assertEqual(chl1[7], (382476, 382599))
self.assertEqual(chl1[8], (383594, 383765))
self.assertEqual(chl1[9], (384666, 384714))
self.assertEqual(chl1[10], (386271, 386392))
self.assertEqual(chl1[11], (391041, 391226))
self.assertEqual(chl1[12], (396322, 396454))
self.assertEqual(chl1[13], (401966, 402107))
self.assertEqual(chl1[14], (403381, 403493))
self.assertEqual(chl1[15], (404899, 405066))
self.assertEqual(chl1[16], (407632, 407798))
self.assertEqual(chl1[17], (419500, 419625))
self.assertEqual(chl1[18], (423861, 423963))
self.assertEqual(chl1[19], (424156, 424354))
self.assertEqual(chl1[20], (425498, 425569))
self.assertEqual(chl1[21], (430934, 431157))
self.assertEqual(chl1[22], (432383, 432499))
self.assertEqual(chl1[23], (432637, 432842))
self.assertEqual(chl1[24], (433357, 433480))
self.assertEqual(chl1[25], (436375, 436555))
self.assertEqual(chl1[26], (439909, 440068))
self.assertEqual(chl1[27], (440699, 440831))
self.assertEqual(chl1[28], (443308, 443381))
self.assertEqual(chl1[29], (447177, 451097))
def test_transcript_union_start(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 10))
t1.add_exon((20, 25))
t1.add_exon((30, 35))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((12, 17))
t2.add_exon((20, 25))
tu = transcript_union([t1, t2]).exons
self.assertEqual(tu, [(3, 10), (12, 17), (20, 25), (30, 35)])
def test_transcript_union_clk1(self):
trans = gtf_parse('tests/inputs/refGene_07.23.2014_CLK1.gtf')
trans = sorted(trans.values(), key = lambda x: x.front_coordinate)
gene_to_trans, gene_to_union = \
reduce_to_gene(trans, transcript_union)
clk1 = gene_to_union['CLK1'].exons
self.assertEqual(clk1[8], (201724402, 201726189))
self.assertEqual(clk1[9], (201726424,201726585))
self.assertEqual(clk1[-1], (201729286, 201729467))
self.assertEqual(clk1[-2], (201728823, 201729284))
self.assertEqual(len(clk1), 12)
def test_intron_from_string(self):
i1_str = "chr1:30-40"
i1 = Intron.from_string(i1_str)
self.assertEqual(i1[0], 30)
self.assertEqual(i1[1], 40)
i2 = Intron("chrX", 10, 100, 3, 2)
i2_from_str = Intron.from_string(str(i2), 3, 2)
self.assertEqual(str(i2), str(i2_from_str))
self.assertEqual(i2.to_string_noext(), i2_from_str.to_string_noext())
class TestTransOps(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_iterate_overlapping_transcripts(self):
t1 = Transcript('t1', 'chr', '+', '.', None, 'g1')
t1.add_exon((3, 35))
t2 = Transcript('t2', 'chr', '+', '.', None, 'g1')
t2.add_exon((12, 17))
t2.add_exon((20, 25))
t3 = Transcript('t3', 'chr', '+', '.', None, 'g1')
t3.add_exon((100, 120))
it = get_overlapping_transcripts([t3, t2, t1])
self.assertEqual(it.next(), [t1, t2])
self.assertEqual(it.next(), [t3])
self.assertRaises(StopIteration, it.next)
class TestIntronTransCompat(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_intron_trans_compat_simple(self):
t1 = Transcript('t1', 'chr1', '+', '.', None, 'g1')
t1.add_exon((3, 35))
t1.add_exon((39, 50))
t2 = Transcript('t2', 'chr2', '+', '.', None, 'g1')
t2.add_exon((3, 35))
t2.add_exon((39, 50))
i1 = Intron('chr1', 35, 39)
compat = intron_trans_compat([i1], [t2, t1])
self.assertEqual(compat, {str(i1) : ['t1']})
def test_intron_trans_compat(self):
# TODO: more testing required !
trans = gtf_parse('tests/inputs/refGene_07.23.2014_CHL1.gtf')
trans = sorted(trans.values(), key = lambda x: x.front_coordinate)
gene_to_trans, gene_to_union = \
reduce_to_gene(trans, transcript_union)
introns = get_introns(gene_to_union['CHL1'])
i2t = intron_trans_compat(introns, gene_to_trans['CHL1'])
self.assertEqual(len(i2t), len(introns))
def test_discard_overlapping_introns_simple(self):
g1 = Transcript('g1', 'chr1', '+', '.', None, 'g1')
g1.add_exon( (0, 3) )
g1.add_exon( (7, 10) )
g1.add_exon( (12, 35) )
g1.add_exon( (40, 100) )
g2 = Transcript('g2', 'chr1', '+', '.', None, 'g2')
g2.add_exon( (0, 4) )
g2.add_exon( (6, 9) )
g2.add_exon( (15, 30) )
g3 = Transcript('g3', 'chr1', '+', '.', None, 'g3')
g3.add_exon( (80, 82) )
g3.add_exon( (87, 90) )
trans = [g2, g1, g3]
g2i = discard_overlapping_introns(trans)
self.assertEqual(g2i['g1'], [Intron('chr1',35, 40)])
self.assertEqual(len(g2i['g2']), 0)
self.assertEqual(len(g2i['g3']), 0)
g1.exons[3] = (40, 60)
g2i = discard_overlapping_introns(trans)
self.assertEqual(g2i['g1'], [Intron('chr1', 35, 40)])
self.assertEqual(len(g2i['g2']), 0)
self.assertEqual(g2i['g3'], [Intron('chr1', 82, 87)])
# TODO: test with empty introns
def test_discard_overlapping_introns_partial_overlap(self):
g1 = Transcript('g1', 'chr1', '+', '.', None, 'g1')
g1.add_exon( (0, 3) )
g1.add_exon( (7, 10) )
g1.add_exon( (12, 20) )
g2 = Transcript('g2', 'chr1', '+', '.', None, 'g2')
g2.add_exon( (70, 72) )
g2.add_exon( (74, 81) )
g2.add_exon( (83, 85) )
g3 = Transcript('g3', 'chr1', '+', '.', None, 'g3')
g3.add_exon( (80, 82) )
g3.add_exon( (87, 90) )
trans = [g2, g1, g3]
g2i = discard_overlapping_introns(trans)
self.assertEqual(g2i['g1'], [Intron('chr1', 3, 7),
Intron('chr1', 10, 12)])
self.assertEqual(g2i['g2'], [Intron('chr1', 72, 74)])
self.assertEqual(g2i['g3'], [])
def test_discard_overlapping_introns_same_coords_diff_gene(self):
g1 = Transcript('g1', 'chr1', '+', '.', None, 'g1')
g1.add_exon( (0, 30) )
g1.add_exon( (40, 70) )
g2 = Transcript('g2', 'chr1', '+', '.', None, 'g2')
g2.add_exon( (0, 30) )
g2.add_exon( (40, 70) )
trans = [g2, g1]
g2i = discard_overlapping_introns(trans)
self.assertEqual(g2i['g1'], [])
self.assertEqual(g2i['g2'], [])
def test_discard_overlapping_introns_gtfird2(self):
# In this test there are two genes that have the same exact front and
# end coordinates for the gene...
gtf_in = 'tests/inputs/GTF2IRD2.gtf'
gtf_dict = gtf_parse(gtf_in)
gtf_list = gtf_dict.values()
# XXX: seems like GTFIRD2B isn't getting added to this overlap... weird
g2i = discard_overlapping_introns(gtf_list)
self.assertEqual(g2i['GTF2IRD2B'], [])
def test_unionize_regions(self):
r = [(0, 5), (10, 20)]
self.assertEqual( unionize_regions(r), r )
r = [(0, 5), (3, 7), (3, 20)]
self.assertEqual( unionize_regions(r), [(0, 20)] )
r = [(0, 5), (3, 7), (10, 20)]
self.assertEqual( unionize_regions(r), [(0, 7), (10, 20)] )
r = [(0, 30), (3, 7), (10, 20)]
self.assertEqual( unionize_regions(r), [(0, 30)] )
r = [(0, 5), (5, 7)]
self.assertEqual( unionize_regions(r), [(0, 7)] )
class TestIntronClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_intron(self):
int1 = Intron('sup', 3, 7)
self.assertEqual(int1[0], 3)
self.assertEqual(int1[1], 7)
# self.assertRaises(Exception, int1.__getitem__, 2)
def test_repr(self):
int1 = Intron('hi', 2, 4)
self.assertEqual(str(int1), 'hi:2-4')
self.assertEqual(int1.to_string_noext(), 'hi:2-4')
int1 = Intron('hi', 2, 4, 1, 1)
self.assertEqual(str(int1), 'hi:1-5')
self.assertEqual(int1.to_string_noext(), 'hi:2-4')
def test_extension(self):
i1 = Intron('hi', 2, 4, 1, 2)
self.assertEqual(i1[0], 1)
self.assertEqual(i1[1], 6)
| gpl-2.0 | -1,933,365,010,458,152,400 | 35.777567 | 80 | 0.548204 | false |
tisnik/fabric8-analytics-common | dashboard/src/jacoco_to_codecov.py | 1 | 4579 | """Module to convert JaCoCo coverage report into the report compatible with Pycov utility."""
import csv
def format_coverage_line(text, statements, missed, coverage, missed_lines=False):
"""Format one line with code coverage report of one class or for a summary."""
format_string = "{:80} {:3d} {:3d} {:3d}%"
if missed_lines:
format_string += " N/A"
return format_string.format(text, statements, missed, coverage)
def compute_coverage(statements, covered):
"""Compute code coverage based on number of all statemts and number of covered statements."""
return 100.0 * covered / statements
class JavaClassCoverageReport:
"""Class representing code coverage report for one Java class."""
def __init__(self, record):
"""Initialize the object by using record read from the CSV file."""
self.group = record[0]
self.package = record[1]
self.class_name = record[2]
self.missed = int(record[7])
self.covered = int(record[8])
self.statements = self.covered + self.missed
self.coverage = compute_coverage(self.statements, self.covered)
def __str__(self):
"""Return readable text representation compatible with Pycov utility output."""
pc = "{package}/{class_name}".format(package=self.package, class_name=self.class_name)
return format_coverage_line(pc, self.statements, self.missed, int(self.coverage))
class ProjectCoverageReport:
"""Class to perform conversion from JaCoCo output to report compatible with Pycov utility."""
def __init__(self, csv_input_file_name):
"""Initialize the object, store the name of input (CSV) file."""
self.csv_input_file_name = csv_input_file_name
@staticmethod
def read_csv(csv_input_file_name, skip_first_line=False):
"""Read the given CSV file, parse it, and return as list of records."""
output = []
with open(csv_input_file_name, 'r') as fin:
csv_content = csv.reader(fin, delimiter=',')
if skip_first_line:
next(csv_content, None)
for row in csv_content:
output.append(row)
return output
@staticmethod
def write_horizontal_rule(fout):
"""Write horizontal rule into the output file."""
fout.write("-" * 108)
fout.write("\n")
@staticmethod
def write_coverage_report_header(fout):
"""Write header compatible with Pycov to the output file."""
fout.write("{:80} {:5} {:4} {:5} {}\n".format(
"Name", "Stmts", "Miss", "Cover", "Missing"))
ProjectCoverageReport.write_horizontal_rule(fout)
@staticmethod
def write_coverage_report_summary(fout, statements, missed, coverage):
"""Write summary compatible with Pycov to the output file."""
ProjectCoverageReport.write_horizontal_rule(fout)
fout.write(format_coverage_line("TOTAL", statements, missed, int(coverage)))
fout.write("\n")
def read_java_classes(self):
"""Read and parse into about Java classes from JaCoCo results."""
data = ProjectCoverageReport.read_csv(self.csv_input_file_name, True)
return [JavaClassCoverageReport(record) for record in data]
def convert_code_coverage_report(self, output_file_name):
"""Convert code coverage report that would be compatible with PyCov output."""
java_classes = self.read_java_classes()
statements, missed, coverage = ProjectCoverageReport.compute_total(java_classes)
with open(output_file_name, "w") as fout:
ProjectCoverageReport.write_coverage_report_header(fout)
for java_class in java_classes:
fout.write(str(java_class) + "\n")
ProjectCoverageReport.write_coverage_report_summary(fout, statements, missed, coverage)
@staticmethod
def compute_total(records):
"""Compute total/summary from all Java class coverage reports."""
statements = 0
covered = 0
missed = 0
for record in records:
statements += record.statements
covered += record.covered
missed += record.missed
coverage = compute_coverage(statements, covered)
return statements, missed, coverage
def main():
"""Just a test ATM."""
p = ProjectCoverageReport("fabric8-analytics-jenkins-plugin.coverage.csv")
p.convert_code_coverage_report("fabric8-analytics-jenkins-plugin.coverage.txt")
if __name__ == "__main__":
# execute only if run as a script
main()
| apache-2.0 | -754,818,450,584,529,800 | 39.166667 | 99 | 0.647085 | false |
fladi/sorl-thumbnail | tests/runtests.py | 1 | 1833 | #!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
from os.path import abspath, dirname, join as pjoin
import django
from django.conf import settings
def runtests(verbosity=1, interactive=True, failfast=True, settings_module='settings.default'):
here = abspath(dirname(__file__))
root = pjoin(here, os.pardir)
sys.path[0:0] = [here, root, pjoin(root, 'sorl')]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
if django.VERSION >= (1, 7):
django.setup()
from django.test.utils import get_runner
sys.stdout.write("Running tests for '%s' \n" % settings_module)
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast
)
return test_runner.run_tests(settings.INSTALLED_APPS)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Runs the test suite for sorl-thumbnail.'
)
parser.add_argument(
'--settings',
dest='settings_module',
action='store',
default='settings.default',
help='Specify settings module.')
parser.add_argument(
'--noinput',
dest='interactive',
action='store_false',
default=True,
help='Do not prompt the user for input of any kind.')
parser.add_argument(
'--failfast',
dest='failfast',
action='store_true',
default=False,
help='Stop running the test suite after first failed test.')
args = parser.parse_args()
failures = runtests(
verbosity=2,
interactive=args.interactive,
failfast=args.failfast,
settings_module=args.settings_module
)
if failures:
sys.exit(bool(failures))
| bsd-3-clause | 1,865,367,570,763,572,000 | 23.77027 | 95 | 0.63066 | false |
huiyiqun/check_mk | cmk_base/core.py | 1 | 9266 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
"""All core related things like direct communication with the running core"""
import fcntl
import os
import socket
import subprocess
import sys
import cmk.paths
import cmk.debug
import cmk.tty as tty
import livestatus
from cmk.exceptions import MKGeneralException
import cmk_base.console as console
import cmk_base.config as config
import cmk_base.core_config as core_config
import cmk_base.core_nagios as core_nagios
from cmk_base.exceptions import MKTimeout
from cmk_base import config_cache
try:
import cmk_base.cee.core_cmc as core_cmc
except ImportError:
core_cmc = None
_restart_lock_fd = None
#.
# .--Control-------------------------------------------------------------.
# | ____ _ _ |
# | / ___|___ _ __ | |_ _ __ ___ | | |
# | | | / _ \| '_ \| __| '__/ _ \| | |
# | | |__| (_) | | | | |_| | | (_) | | |
# | \____\___/|_| |_|\__|_| \___/|_| |
# | |
# +----------------------------------------------------------------------+
# | Invoke actions affecting the core like reload/restart |
# '----------------------------------------------------------------------'
def do_reload():
do_restart(True)
# TODO: Cleanup duplicate code with automation_restart()
def do_restart(only_reload = False):
try:
backup_path = None
if try_get_activation_lock():
# TODO: Replace by MKBailOut()/MKTerminate()?
console.error("Other restart currently in progress. Aborting.\n")
sys.exit(1)
# Save current configuration
if os.path.exists(cmk.paths.nagios_objects_file):
backup_path = cmk.paths.nagios_objects_file + ".save"
console.verbose("Renaming %s to %s\n", cmk.paths.nagios_objects_file, backup_path, stream=sys.stderr)
os.rename(cmk.paths.nagios_objects_file, backup_path)
else:
backup_path = None
try:
core_config.do_create_config(with_agents=True)
except Exception, e:
# TODO: Replace by MKBailOut()/MKTerminate()?
console.error("Error creating configuration: %s\n" % e)
if backup_path:
os.rename(backup_path, cmk.paths.nagios_objects_file)
if cmk.debug.enabled():
raise
sys.exit(1)
if config.monitoring_core == "cmc" or core_nagios.do_check_nagiosconfig():
if backup_path:
os.remove(backup_path)
core_config.precompile()
do_core_action(only_reload and "reload" or "restart")
else:
# TODO: Replace by MKBailOut()/MKTerminate()?
console.error("Configuration for monitoring core is invalid. Rolling back.\n")
broken_config_path = "%s/check_mk_objects.cfg.broken" % cmk.paths.tmp_dir
file(broken_config_path, "w").write(file(cmk.paths.nagios_objects_file).read())
console.error("The broken file has been copied to \"%s\" for analysis.\n" % broken_config_path)
if backup_path:
os.rename(backup_path, cmk.paths.nagios_objects_file)
else:
os.remove(cmk.paths.nagios_objects_file)
sys.exit(1)
except Exception, e:
try:
if backup_path and os.path.exists(backup_path):
os.remove(backup_path)
except:
pass
if cmk.debug.enabled():
raise
# TODO: Replace by MKBailOut()/MKTerminate()?
console.error("An error occurred: %s\n" % e)
sys.exit(1)
def try_get_activation_lock():
global _restart_lock_fd
# In some bizarr cases (as cmk -RR) we need to avoid duplicate locking!
if config.restart_locking and _restart_lock_fd == None:
lock_file = cmk.paths.default_config_dir + "/main.mk"
_restart_lock_fd = os.open(lock_file, os.O_RDONLY)
# Make sure that open file is not inherited to monitoring core!
fcntl.fcntl(_restart_lock_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
try:
console.verbose("Waiting for exclusive lock on %s.\n" % lock_file, stream=sys.stderr)
fcntl.flock(_restart_lock_fd, fcntl.LOCK_EX |
( config.restart_locking == "abort" and fcntl.LOCK_NB or 0))
except:
return True
return False
# Action can be restart, reload, start or stop
def do_core_action(action, quiet=False):
if not quiet:
console.output("%sing monitoring core..." % action.title())
if config.monitoring_core == "nagios":
os.putenv("CORE_NOVERIFY", "yes")
command = [ "%s/etc/init.d/core" % cmk.paths.omd_root,
action ]
else:
command = [ "omd", action, "cmc" ]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
result = p.wait()
if result != 0:
output = p.stdout.read()
if not quiet:
console.output("ERROR: %s\n" % output)
raise MKGeneralException("Cannot %s the monitoring core: %s" % (action, output))
else:
if not quiet:
console.output(tty.ok + "\n")
#.
# .--Timeperiods---------------------------------------------------------.
# | _____ _ _ _ |
# | |_ _(_)_ __ ___ ___ _ __ ___ _ __(_) ___ __| |___ |
# | | | | | '_ ` _ \ / _ \ '_ \ / _ \ '__| |/ _ \ / _` / __| |
# | | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ |
# | |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Fetching timeperiods from the core |
# '----------------------------------------------------------------------'
# Check if a timeperiod is currently active. We have no other way than
# doing a Livestatus query. This is not really nice, but if you have a better
# idea, please tell me...
def check_timeperiod(timeperiod):
# Let exceptions happen, they will be handled upstream.
try:
update_timeperiods_cache()
except MKTimeout:
raise
except:
if cmk.debug.enabled():
raise
# If the query is not successful better skip this check then fail
return True
# Note: This also returns True when the timeperiod is unknown
# The following function timeperiod_active handles this differently
return config_cache.get_dict("timeperiods_cache").get(timeperiod, True) == True
# Returns
# True : active
# False: inactive
# None : unknown timeperiod
#
# Raises an exception if e.g. a timeout or connection error appears.
# This way errors can be handled upstream.
def timeperiod_active(timeperiod):
update_timeperiods_cache()
return config_cache.get_dict("timeperiods_cache").get(timeperiod)
def update_timeperiods_cache():
# { "last_update": 1498820128, "timeperiods": [{"24x7": True}] }
# The value is store within the config cache since we need a fresh start on reload
tp_cache = config_cache.get_dict("timeperiods_cache")
if not tp_cache:
response = livestatus.LocalConnection().query("GET timeperiods\nColumns: name in")
for tp_name, tp_active in response:
tp_cache[tp_name] = bool(tp_active)
def cleanup_timeperiod_caches():
config_cache.get_dict("timeperiods_cache").clear()
| gpl-2.0 | 3,326,192,585,655,252,000 | 38.598291 | 113 | 0.506583 | false |
pfalcon/picotui | picotui/editorext.py | 1 | 5118 | #
# Extended VT100 terminal text editor, etc. widgets
# Copyright (c) 2015 Paul Sokolovsky
# Distributed under MIT License
#
import sys
import os
from .editor import *
# Edit single line, quit on Enter/Esc
class LineEditor(Editor):
def handle_cursor_keys(self, key):
if super().handle_cursor_keys(key):
self.just_started = False
return True
return False
def handle_key(self, key):
if key in (KEY_ENTER, KEY_ESC):
return key
if self.just_started:
# Overwrite initial string with new content
self.set_lines([""])
self.col = 0
self.just_started = False
return super().handle_key(key)
def edit(self, line):
self.set_lines([line])
self.col = len(line)
self.adjust_cursor_eol()
self.just_started = True
key = self.loop()
if key == KEY_ENTER:
return self.content[0]
return None
class Viewer(Editor):
def handle_key(self, key):
if key in (KEY_ENTER, KEY_ESC):
return key
if super().handle_cursor_keys(key):
return True
# Viewer with colored lines, (whole line same color)
class LineColorViewer(Viewer):
def show_line(self, l, i):
if self.is_dict_color:
c = self.lines_c.get(i, self.def_c)
else:
try:
c = self.lines_c[i]
except IndexError:
c = self.def_c
self.attr_color(c)
super().show_line(l, i)
self.attr_reset()
def set_line_colors(self, default_color, color_list={}):
self.def_c = default_color
self.lines_c = color_list
self.is_dict_color = isinstance(color_list, dict)
# Viewer with color support, (echo line may consist of spans
# of different colors)
class CharColorViewer(Viewer):
def show_line(self, l, i):
# TODO: handle self.margin, self.width
length = 0
for span in l:
if isinstance(span, tuple):
span, c = span
else:
c = self.def_c
self.attr_color(c)
self.wr(span)
length += len(span)
self.attr_color(self.def_c)
self.clear_num_pos(self.width - length)
self.attr_reset()
def set_def_color(self, default_color):
self.def_c = default_color
class EditorExt(Editor):
screen_width = 80
def __init__(self, left=0, top=0, width=80, height=24):
super().__init__(left, top, width, height)
# +1 assumes there's a border around editor pane
self.status_y = top + height + 1
def get_cur_line(self):
return self.content[self.cur_line]
def line_visible(self, no):
return self.top_line <= no < self.top_line + self.height
# If line "no" is already on screen, just reposition cursor to it and
# return False. Otherwise, show needed line either at the center of
# screen or at the top, and return True.
def goto_line(self, no, col=None, center=True):
self.cur_line = no
if self.line_visible(no):
self.row = no - self.top_line
if col is not None:
self.col = col
if self.adjust_cursor_eol():
self.redraw()
self.set_cursor()
return False
if center:
c = self.height // 2
if no > c:
self.top_line = no - c
self.row = c
else:
self.top_line = 0
self.row = no
else:
self.top_line = no
self.row = 0
if col is not None:
self.col = col
self.adjust_cursor_eol()
self.redraw()
return True
def show_status(self, msg):
self.cursor(False)
self.goto(0, self.status_y)
self.wr(msg)
self.clear_to_eol()
self.set_cursor()
self.cursor(True)
def show_cursor_status(self):
self.cursor(False)
self.goto(0, 31)
self.wr("% 3d:% 3d" % (self.cur_line, self.col + self.margin))
self.set_cursor()
self.cursor(True)
def dialog_edit_line(self, left=None, top=8, width=40, height=3, line="", title=""):
if left is None:
left = (self.screen_width - width) / 2
self.dialog_box(left, top, width, height, title)
e = LineEditor(left + 1, top + 1, width - 2, height - 2)
return e.edit(line)
if __name__ == "__main__":
with open(sys.argv[1]) as f:
content = f.read().splitlines()
#content = f.readlines()
#os.write(1, b"\x1b[18t")
#key = os.read(0, 32)
#print(repr(key))
#key = os.read(0, 32)
#print(repr(key))
#1/0
e = EditorExt(left=1, top=1, width=60, height=25)
e.init_tty()
e.enable_mouse()
s = e.dialog_edit_line(10, 5, 40, 3, title="Enter name:", line="test")
e.cls()
e.deinit_tty()
print()
print(s)
1/0
# e.cls()
e.draw_box(0, 0, 62, 27)
e.set_lines(content)
e.loop()
e.deinit_tty()
| mit | -3,940,507,001,391,494,700 | 24.979695 | 88 | 0.540445 | false |
jfisher-usgs/DigitalLibrary | files/form_addFolder.py | 1 | 1306 | ## Script (Python) "form_addFolder"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
request = container.REQUEST
REQUEST = context.REQUEST
publicStatus = ''
if not context.anonymousHasPermission(context):
publicStatus = 'DISABLED'
print context.standard_html_header(context, request)
print '<form action="' + REQUEST['URL1'] + '" method="post" enctype="multipart/form-data">'
print '<h1>New Folder</h1>'
print '<fieldset>'
print '<ol>'
print '<li>'
print '<label for="new_folder">Name</label>'
print '<input type="text" value="" name="new_folder" id="new_folder" size="30" />'
print '<input type="checkbox" name="public_folder" title="Grant anonymous users view/access permissions" ' + publicStatus + ' />Public'
print '<input type="checkbox" name="image_folder" title="Add image gallery" />Image'
print '</li>'
print '</ol>'
print '</fieldset>'
print '<fieldset class="submit">'
print '<input type="submit" name="action" value=" Add " />'
print '<input type="submit" name="action" value="Cancel" />'
print '</fieldset>'
print '</form>'
print '<script type="text/javascript">setDefaultFocus(\'new_folder\');</script>'
print context.standard_html_footer(context, request)
return printed
| gpl-2.0 | -2,139,083,281,066,213,400 | 26.787234 | 135 | 0.701378 | false |
anthropo-lab/XP | EPHEMER/insider_trading_project/insider_trading/consumers.py | 1 | 7993 | from channels import Group as channelsGroup
from channels.sessions import channel_session
import random
from .models import Group as OtreeGroup, Subsession as OtreeSubsession, Constants
import json
import channels
import logging
from otree import constants_internal
import django.test
from otree.common_internal import (get_admin_secret_code)
from threading import Event
import time
client = django.test.Client()
ADMIN_SECRET_CODE = get_admin_secret_code()
#############################################
#############################################
# Connected to websocket.connect
def ws_admin_connect(message):
print("*********CONNECT************")
channelsGroup("adminreport").add(message.reply_channel)
# Connected to websocket.receive
def ws_admin_message(message):
print("*********RECEIVE************")
# Decrypt the url: No info in the url in this app
# Decrypt the received message
jsonmessage = json.loads(message.content['text'])
subsession_pk = jsonmessage['subsession_pk']
mysubsession = OtreeSubsession.objects.get(pk=subsession_pk)
if 'order' in jsonmessage:
order = jsonmessage['order']
if order == "push_all_players_on_page":
page_name = jsonmessage['page_name']
round_nb = jsonmessage['round_nb']
for p in mysubsession.get_players():
if ((str(p.participant._current_page_name) == page_name)
& (p.participant._round_number == round_nb)):
# This player is one of those who needs to be advanced
try:
if p.participant._current_form_page_url:
resp = client.post(
p.participant._current_form_page_url,
data={
constants_internal.timeout_happened: True,
constants_internal.admin_secret_code: ADMIN_SECRET_CODE
},
follow=True
)
else:
resp = client.get(p.participant._start_url(), follow=True)
except:
logging.exception("Failed to advance participant.")
raise
assert resp.status_code < 400
p.participant.vars['participant_was_pushed'] = 'True'
p.participant.save()
channels.Group(
'auto-advance-{}'.format(p.participant.code)
).send(
{'text': json.dumps(
{'auto_advanced': True})}
)
elif order == "push_active_players_on_page":
group_pk = jsonmessage['group_pk']
mygroup = OtreeGroup.objects.get(pk=group_pk)
page_name = jsonmessage['page_name']
round_nb = jsonmessage['round_nb']
for p in mygroup.get_players():
if ((str(p.participant._current_page_name) == page_name)
& (p.participant._round_number == round_nb)
& (p.participant.vars['active_flag'] != 'Inactive')):
# This player is one of those who needs to be advanced
try:
if p.participant._current_form_page_url:
resp = client.post(
p.participant._current_form_page_url,
data={
constants_internal.timeout_happened: True,
constants_internal.admin_secret_code: ADMIN_SECRET_CODE
},
follow=True
)
else:
resp = client.get(p.participant._start_url(), follow=True)
except:
logging.exception("Failed to advance participant.")
raise
assert resp.status_code < 400
p.participant.vars['participant_was_pushed'] = 'True'
p.participant.save()
channels.Group(
'auto-advance-{}'.format(p.participant.code)
).send(
{'text': json.dumps(
{'auto_advanced': True})}
)
elif order == "push_inactive_players_on_page":
group_pk = jsonmessage['group_pk']
mygroup = OtreeGroup.objects.get(pk=group_pk)
page_name = jsonmessage['page_name']
round_nb = jsonmessage['round_nb']
for p in mygroup.get_players():
if ((str(p.participant._current_page_name) == page_name)
& (p.participant._round_number == round_nb)
& (p.participant.vars['active_flag'] == 'Inactive')):
# This player is one of those who needs to be advanced
try:
if p.participant._current_form_page_url:
resp = client.post(
p.participant._current_form_page_url,
data={
constants_internal.timeout_happened: True,
constants_internal.admin_secret_code: ADMIN_SECRET_CODE
},
follow=True
)
else:
resp = client.get(p.participant._start_url(), follow=True)
except:
logging.exception("Failed to advance participant.")
raise
assert resp.status_code < 400
p.participant.vars['participant_was_pushed'] = 'True'
p.participant.save()
channels.Group(
'auto-advance-{}'.format(p.participant.code)
).send(
{'text': json.dumps(
{'auto_advanced': True})}
)
elif order == "deactivate_all_group_on_page":
group_pk = jsonmessage['group_pk']
mygroup = OtreeGroup.objects.get(pk=group_pk)
page_name = jsonmessage['page_name']
round_nb = jsonmessage['round_nb']
for p in mygroup.get_players():
if ((str(p.participant._current_page_name) == page_name)
& (p.participant._round_number == round_nb)):
p.participant.vars['active_flag'] = 'Inactive'
p.participant.save()
elif order == "reactivate_all_group_on_page":
group_pk = jsonmessage['group_pk']
mygroup = OtreeGroup.objects.get(pk=group_pk)
page_name = jsonmessage['page_name']
round_nb = jsonmessage['round_nb']
for p in mygroup.get_players():
if ((str(p.participant._current_page_name) == page_name)
& (p.participant._round_number == round_nb)):
p.participant.vars['active_flag'] = 'Playing_No_Change_Game'
p.participant.save()
elif order == "make_grouping_phase1":
mysubsession.make_grouping_phase1()
elif order == "make_grouping_phase2":
mysubsession.make_grouping_phase2()
#############################################
# Give feedback
channelsGroup("adminreport").send({'text': json.dumps(
{"order": "refresh"})}
)
# Connected to websocket.disconnect
def ws_admin_disconnect(message):
print("*********DISCONNECT************")
channelsGroup("adminreport").discard(message.reply_channel)
| gpl-3.0 | -7,776,490,304,224,844,000 | 44.936782 | 91 | 0.477793 | false |
icarito/sugar | src/jarabe/journal/listmodel.py | 1 | 10564 | # Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
import json
from gi.repository import GObject
from gi.repository import Gtk
from gettext import gettext as _
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics import style
from sugar3 import util
from jarabe.journal import model
from jarabe.journal import misc
DS_DBUS_SERVICE = 'org.laptop.sugar.DataStore'
DS_DBUS_INTERFACE = 'org.laptop.sugar.DataStore'
DS_DBUS_PATH = '/org/laptop/sugar/DataStore'
class ListModel(GObject.GObject, Gtk.TreeModel, Gtk.TreeDragSource):
__gtype_name__ = 'JournalListModel'
__gsignals__ = {
'ready': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'progress': (GObject.SignalFlags.RUN_FIRST, None, ([])),
}
COLUMN_UID = 0
COLUMN_FAVORITE = 1
COLUMN_ICON = 2
COLUMN_ICON_COLOR = 3
COLUMN_TITLE = 4
COLUMN_TIMESTAMP = 5
COLUMN_CREATION_TIME = 6
COLUMN_FILESIZE = 7
COLUMN_PROGRESS = 8
COLUMN_BUDDY_1 = 9
COLUMN_BUDDY_2 = 10
COLUMN_BUDDY_3 = 11
COLUMN_SELECT = 12
_COLUMN_TYPES = {
COLUMN_UID: str,
COLUMN_FAVORITE: bool,
COLUMN_ICON: str,
COLUMN_ICON_COLOR: object,
COLUMN_TITLE: str,
COLUMN_TIMESTAMP: str,
COLUMN_CREATION_TIME: str,
COLUMN_FILESIZE: str,
COLUMN_PROGRESS: int,
COLUMN_BUDDY_1: object,
COLUMN_BUDDY_3: object,
COLUMN_BUDDY_2: object,
COLUMN_SELECT: bool,
}
_PAGE_SIZE = 10
def __init__(self, query):
GObject.GObject.__init__(self)
self._last_requested_index = None
self._temp_drag_file_uid = None
self._cached_row = None
self._query = query
self._all_ids = []
t = time.time()
self._result_set = model.find(query, ListModel._PAGE_SIZE)
logging.debug('init resultset: %r', time.time() - t)
self._temp_drag_file_path = None
self._selected = []
# HACK: The view will tell us that it is resizing so the model can
# avoid hitting D-Bus and disk.
self.view_is_resizing = False
# Store the changes originated in the treeview so we do not need
# to regenerate the model and stuff up the scroll position
self._updated_entries = {}
self._result_set.ready.connect(self.__result_set_ready_cb)
self._result_set.progress.connect(self.__result_set_progress_cb)
def get_all_ids(self):
return self._all_ids
def __result_set_ready_cb(self, **kwargs):
t = time.time()
self._all_ids = self._result_set.find_ids(self._query)
logging.debug('get all ids: %r', time.time() - t)
self.emit('ready')
def __result_set_progress_cb(self, **kwargs):
self.emit('progress')
def setup(self, updated_callback=None):
self._result_set.setup()
self._updated_callback = updated_callback
def stop(self):
self._result_set.stop()
def get_metadata(self, path):
return model.get(self[path][ListModel.COLUMN_UID])
def do_get_n_columns(self):
return len(ListModel._COLUMN_TYPES)
def do_get_column_type(self, index):
return ListModel._COLUMN_TYPES[index]
def do_iter_n_children(self, iterator):
if iterator is None:
return self._result_set.length
else:
return 0
def set_value(self, iterator, column, value):
index = iterator.user_data
self._result_set.seek(index)
metadata = self._result_set.read()
if column == ListModel.COLUMN_FAVORITE:
metadata['keep'] = value
if column == ListModel.COLUMN_TITLE:
metadata['title'] = value
self._updated_entries[metadata['uid']] = metadata
if self._updated_callback is not None:
model.updated.disconnect(self._updated_callback)
model.write(metadata, update_mtime=False,
ready_callback=self.__reconnect_updates_cb)
def __reconnect_updates_cb(self, metadata, filepath, uid):
logging.error('__reconnect_updates_cb')
if self._updated_callback is not None:
model.updated.connect(self._updated_callback)
def do_get_value(self, iterator, column):
if self.view_is_resizing:
return None
index = iterator.user_data
if index == self._last_requested_index:
return self._cached_row[column]
if index >= self._result_set.length:
return None
self._result_set.seek(index)
metadata = self._result_set.read()
metadata.update(self._updated_entries.get(metadata['uid'], {}))
self._last_requested_index = index
self._cached_row = []
self._cached_row.append(metadata['uid'])
self._cached_row.append(metadata.get('keep', '0') == '1')
self._cached_row.append(misc.get_icon_name(metadata))
if misc.is_activity_bundle(metadata):
xo_color = XoColor('%s,%s' % (style.COLOR_BUTTON_GREY.get_svg(),
style.COLOR_TRANSPARENT.get_svg()))
else:
xo_color = misc.get_icon_color(metadata)
self._cached_row.append(xo_color)
title = GObject.markup_escape_text(metadata.get('title',
_('Untitled')))
self._cached_row.append('<b>%s</b>' % (title, ))
try:
timestamp = float(metadata.get('timestamp', 0))
except (TypeError, ValueError):
timestamp_content = _('Unknown')
else:
timestamp_content = util.timestamp_to_elapsed_string(timestamp)
self._cached_row.append(timestamp_content)
try:
creation_time = float(metadata.get('creation_time'))
except (TypeError, ValueError):
self._cached_row.append(_('Unknown'))
else:
self._cached_row.append(
util.timestamp_to_elapsed_string(float(creation_time)))
try:
size = int(metadata.get('filesize'))
except (TypeError, ValueError):
size = None
self._cached_row.append(util.format_size(size))
try:
progress = int(float(metadata.get('progress', 100)))
except (TypeError, ValueError):
progress = 100
self._cached_row.append(progress)
buddies = []
if metadata.get('buddies'):
try:
buddies = json.loads(metadata['buddies']).values()
except json.decoder.JSONDecodeError, exception:
logging.warning('Cannot decode buddies for %r: %s',
metadata['uid'], exception)
if not isinstance(buddies, list):
logging.warning('Content of buddies for %r is not a list: %r',
metadata['uid'], buddies)
buddies = []
for n_ in xrange(0, 3):
if buddies:
try:
nick, color = buddies.pop(0)
except (AttributeError, ValueError), exception:
logging.warning('Malformed buddies for %r: %s',
metadata['uid'], exception)
else:
self._cached_row.append([nick, XoColor(color)])
continue
self._cached_row.append(None)
return self._cached_row[column]
def do_iter_nth_child(self, parent_iter, n):
return (False, None)
def do_get_path(self, iterator):
treepath = Gtk.TreePath((iterator.user_data,))
return treepath
def do_get_iter(self, path):
idx = path.get_indices()[0]
iterator = Gtk.TreeIter()
iterator.user_data = idx
return (True, iterator)
def do_iter_next(self, iterator):
idx = iterator.user_data + 1
if idx >= self._result_set.length:
iterator.stamp = -1
return (False, iterator)
else:
iterator.user_data = idx
return (True, iterator)
def do_get_flags(self):
return Gtk.TreeModelFlags.ITERS_PERSIST | Gtk.TreeModelFlags.LIST_ONLY
def do_iter_children(self, iterator):
return (False, iterator)
def do_iter_has_child(self, iterator):
return False
def do_iter_parent(self, iterator):
return (False, Gtk.TreeIter())
def do_drag_data_get(self, path, selection):
uid = self[path][ListModel.COLUMN_UID]
target_atom = selection.get_target()
target_name = target_atom.name()
if target_name == 'text/uri-list':
# Only get a new temp path if we have a new file, the frame
# requests a path many times and if we give it a new path it
# ends up with a broken path
if uid != self._temp_drag_file_uid:
# Get hold of a reference so the temp file doesn't get deleted
self._temp_drag_file_path = model.get_file(uid)
self._temp_drag_file_uid = uid
logging.debug('putting %r in selection', self._temp_drag_file_path)
selection.set(target_atom, 8, self._temp_drag_file_path)
return True
elif target_name == 'journal-object-id':
# uid is unicode but Gtk.SelectionData.set() needs str
selection.set(target_atom, 8, str(uid))
return True
return False
def set_selected(self, uid, value):
if value:
self._selected.append(uid)
else:
self._selected.remove(uid)
def is_selected(self, uid):
return uid in self._selected
def get_selected_items(self):
return self._selected
def restore_selection(self, selected):
self._selected = selected
def select_all(self):
self._selected = self._all_ids[:]
def select_none(self):
self._selected = []
| gpl-3.0 | -7,650,490,496,236,193,000 | 32.220126 | 79 | 0.591064 | false |
MuffinMedic/znc-weblog | weblog.py | 1 | 5161 | import znc
import os
def is_safe_path(basedir, path):
return os.path.abspath(path).startswith(basedir)
class weblog(znc.Module):
module_types = [znc.CModInfo.GlobalModule]
description = "Allowings viewing of log files from the ZNC webadmin"
wiki_page = "Weblog"
def OnLoad(self, args, message):
return True
def WebRequiresLogin(self):
return True
def WebRequiresAdmin(self):
return False
def GetWebMenuTitle(self):
return "Log Viewer"
def OnWebRequest(self, sock, page, tmpl):
user = sock.GetUser()
dir = sock.GetParam('dir', False)
if page == "index":
if sock.GetRawParam('scope', True):
scope = sock.GetRawParam('scope', True)
self.setscope(scope, sock, tmpl)
try:
self.listdir(tmpl, dir, sock)
except KeyError:
row = tmpl.AddRow("ErrorLoop")
row["error"] = "No scope set. Please set one above."
elif page == "log" or page == "raw":
self.viewlog(tmpl, dir, sock, page)
self.getscopes(sock, tmpl)
return True
def listdir(self, tmpl, dir, sock):
base = self.getbase(sock)
try:
dir_list = sorted(os.listdir(base + dir))
self.breadcrumbs(tmpl, dir, False)
if len(dir_list) > 0:
for item in dir_list:
row = tmpl.AddRow("ListLoop")
rel = dir + '/' + item if dir else item
path = base + rel
if os.path.isfile(path):
url = 'log?dir=' + rel.replace('#', '%23')
size = str(os.path.getsize(path) >> 10) + " KB"
elif os.path.isdir(path):
url = '?dir=' + rel.replace('#', '%23')
size = len([name for name in os.listdir(path)])
row["scope"] = url
row["item"] = item
row["size"] = str(size)
else:
row = tmpl.AddRow("ErrorLoop")
row["error"] = "Directory empty."
except FileNotFoundError:
row = tmpl.AddRow("ErrorLoop")
row["error"] = "Directory does not exist. Please make sure you have the log module enabled and that you are attempting to access logs at the appropriate level (global, user, or network)."
def viewlog(self, tmpl, dir, sock, page):
base = self.getbase(sock)
if not is_safe_path(base, base + dir):
if page == "raw":
row = tmpl.AddRow("LogLoop")
row['log'] = "Error: invalid directory provided."
return
row = tmpl.AddRow("ErrorLoop")
row["error"] = "Invalid directory provided."
return
path = base + dir
row = tmpl.AddRow("LogLoop")
with open(path, 'r', encoding='utf8') as log:
log = log.read()
if page == "raw":
log = log.replace('<', '<').replace('>', '>')
row['log'] = log
if page == "log":
self.breadcrumbs(tmpl, dir, True)
row['raw'] = 'raw?dir=' + dir.replace('#', '%23')
def breadcrumbs(self, tmpl, dir, islog):
folders = dir.split('/')
crumbs = ['<a href="">logs / </a>']
row = tmpl.AddRow("BreadcrumbLoop")
row["crumbtext"] = "logs"
row["crumburl"] = ""
for i in range(0, len(folders)):
if folders[i]:
row = tmpl.AddRow("BreadcrumbLoop")
row["crumbtext"] = folders[i]
url = '/'.join(folders[0:i+1])
url = url.replace('#', '%23')
row["crumburl"] = url
if i == len(folders) - 1 and islog:
row["islog"] = "True"
def getbase(self, sock):
base = znc.CZNC.Get().GetZNCPath()
user = sock.GetUser()
scope = self.nv[user]
if scope == "Global":
base = base + '/moddata/log/' + user + '/'
elif scope == "User":
base = base + '/users/' + user + '/moddata/log/'
else:
base = base + '/users/' + user + '/networks/' + self.nv[user] + '/moddata/log/'
return base
def getscopes(self, sock, tmpl):
user_string = sock.GetUser()
user = znc.CZNC.Get().FindUser(user_string)
networks = user.GetNetworks()
net_array = []
for network in networks:
net_array.append(network.GetName())
net_array = sorted(net_array)
net_array.insert(0, 'User'); net_array.insert(0, 'Global')
for net in net_array:
row = tmpl.AddRow("ScopeLoop")
try:
if net == self.nv[user_string]:
row["active"] = "True"
except KeyError:
pass
row["network"] = net
def setscope(self, scope, sock, tmpl):
user = sock.GetUser()
self.nv[user] = scope
row = tmpl.AddRow("MessageLoop")
row["message"] = "Scope successfully set."
| gpl-3.0 | 4,999,262,817,996,553,000 | 32.732026 | 199 | 0.496803 | false |
CalvinNeo/EasyMLPlatform | py/graphic/tree.py | 1 | 4067 | #coding:utf8
import numpy as np
import math
import pylab as pl
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json
class GraphTree:
def __init__(self):
self.jsonobj = {}
self.leafNode = dict(boxstyle = 'round4',fc = '0.8')
self.branchNode = dict(boxstyle = 'sawtooth',fc = '0.8')
self.arrow = dict(arrowstyle = '<-')
self.depth = 0
self.leafcount = 0
def get_depth_leafcount(self,root):
current_node = root.keys()[0] #name of choice node(string)
branch_dict = root[current_node]
maxdepth, thisdepth, thisleafcount = 0,0,0
for current_node in branch_dict.keys():
# print current_node,type(branch_dict[current_node]).__name__
if type(branch_dict[current_node]).__name__ == 'dict':
temp = self.get_depth_leafcount(branch_dict[current_node])
thisdepth = 1 + temp[0]
thisleafcount += temp[1]
else:
thisdepth = 1
thisleafcount += 1
if thisdepth > maxdepth:
maxdepth = thisdepth
return maxdepth,thisleafcount
def load(self,strjson):
self.jsonobj = dict(strjson)
self.depth,self.leafcount = self.get_depth_leafcount(self.jsonobj)
def plotMidText(self, cntrPt, parentPt, txtString):
xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
self.ax1.text(xMid, yMid, txtString)
def plotNode(self, nodeTxt, cntrPt, parentPt, nodeType):
self.ax1.annotate(nodeTxt, xy = parentPt, xycoords = 'axes fraction', xytext = cntrPt, \
textcoords = 'axes fraction', va = 'center', ha = 'center', bbox = nodeType, arrowprops = self.arrow)
def plotTree(self, myTree, parentPt, nodeTxt):
depth, leaves = self.get_depth_leafcount(myTree)
current_node = myTree.keys()[0]
cntrPt = (self.xOff + (1.0 + leaves) / 2.0 / self.leafcount, self.yOff)
self.plotMidText(cntrPt, parentPt, nodeTxt)
self.plotNode(current_node, cntrPt, parentPt, self.branchNode)
branch_dict = myTree[current_node]
self.yOff -= 1.0 / self.depth
for current_node in branch_dict.keys():
if type(branch_dict[current_node]).__name__ == 'dict':
self.plotTree(branch_dict[current_node], cntrPt, str(current_node))
else:
self.xOff += 1.0 / self.leafcount
self.plotNode(branch_dict[current_node], (self.xOff, self.yOff), cntrPt, self.leafNode)
self.plotMidText((self.xOff, self.yOff), cntrPt, str(current_node))
self.yOff += 1.0 / self.depth
def createPlot(self, show = True, save = ''):
fig = plt.figure(1, facecolor = 'white')
fig.clf()
axprops = dict(xticks = [], yticks = [])
self.ax1 = plt.subplot(111,frameon = False, **axprops)
self.xOff, self.yOff = -0.5 / self.leafcount, 1.0
self.plotTree(self.jsonobj, (0.5,1.0), '')
import StringIO, urllib, base64
if show:
plt.show()
else:
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf))
imgdata.close()
return uri
def showPlot(self):
plt.show()
if __name__ == '__main__':
tr = GraphTree()
# aa = '{"no surfacing":{"0":"no","1":{"flippers":{"0":"no","1":"yes"}}}}'
# tr.load(json.loads(aa))
#JSON can't have non-string key
aa = {"aged":{"0":"no","1":{"male":{"0":"no","1":"yes"}}}}
# aa = {'water': {0: 1, 1: {'foot': {0: "'no'", 1: "'yes'"}}}}
print dict(aa)
# aa = {"no surfacing":{0:"no",1:{"flippers":{0:"no",1:"yes"}}}}
# print dict(aa)
tr.load(aa)
print tr.leafcount,tr.depth
tr.createPlot(show=True)
| apache-2.0 | 7,361,431,833,645,096,000 | 38.485437 | 114 | 0.572412 | false |
deplinenoise/rlaunch | vbcc-driver.py | 1 | 1297 | #! /usr/bin/python
import sys
import os
import os.path
import subprocess
import re
line_re = re.compile(r'^(warning|error) (\d+) in line (\d+) of "([^"]*)":\s*(.*)$')
def fix_fn(root_dir, fn):
# If there are path separators in the filename, assume the path is valid
if fn.find(os.sep) != -1:
return fn
if os.path.exists(fn):
return fn
full_path = os.path.join(root_dir, fn)
if os.path.exists(full_path):
return full_path
return 'bah'
def munge(root_dir, line):
m = re.match(line_re, line)
if not m:
return line.strip()
fn = fix_fn(root_dir, m.group(4))
return '%s(%s) : %s %s: %s' % (fn, m.group(3), m.group(1), m.group(2), m.group(5))
if __name__ == '__main__':
vbcc_root = os.environ.get('VBCC')
if not vbcc_root:
sys.stderr.write('VBCC environment variable not set')
sys.exit(1)
vc_bin = os.path.join(vbcc_root, 'bin' + os.sep + 'vc')
if os.name == 'nt':
vc_bin += '.exe'
root_dir = '.'
for arg in sys.argv[1:]:
if arg.endswith('.c'):
root_dir, dummy = os.path.split(arg)
vc = subprocess.Popen(
args = sys.argv[1:],
executable = vc_bin,
universal_newlines=True,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
for line in vc.stdout:
sys.stdout.write(munge(root_dir, line))
sys.stdout.write('\n')
| gpl-3.0 | 2,088,134,103,908,204,300 | 19.587302 | 83 | 0.621434 | false |
thefab/tornadis | tornadis/pool.py | 1 | 7269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
import tornado.gen
import tornado.ioloop
import tornado.locks
import logging
import functools
from collections import deque
from tornadis.client import Client
from tornadis.utils import ContextManagerFuture
from tornadis.exceptions import ClientError
LOG = logging.getLogger(__name__)
class ClientPool(object):
"""High level object to deal with a pool of redis clients."""
def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,
**client_kwargs):
"""Constructor.
Args:
max_size (int): max size of the pool (-1 means "no limit").
client_timeout (int): timeout in seconds of a connection released
to the pool (-1 means "no timeout").
autoclose (boolean): automatically disconnect released connections
with lifetime > client_timeout (test made every
client_timeout/10 seconds).
client_kwargs (dict): Client constructor arguments.
"""
self.max_size = max_size
self.client_timeout = client_timeout
self.client_kwargs = client_kwargs
self.__ioloop = client_kwargs.get('ioloop',
tornado.ioloop.IOLoop.instance())
self.autoclose = autoclose
self.__pool = deque()
if self.max_size != -1:
self.__sem = tornado.locks.Semaphore(self.max_size)
else:
self.__sem = None
self.__autoclose_periodic = None
if self.autoclose and self.client_timeout > 0:
every = int(self.client_timeout) * 100
if int(tornado.version[0]) >= 5:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every)
else:
cb = tornado.ioloop.PeriodicCallback(self._autoclose,
every, self.__ioloop)
self.__autoclose_periodic = cb
self.__autoclose_periodic.start()
def _get_client_from_pool_or_make_it(self):
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
continue
break
except IndexError:
client = self._make_client()
return (True, client)
return (False, client)
@tornado.gen.coroutine
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client)
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client
def _autoclose(self):
newpool = deque()
try:
while True:
client = self.__pool.popleft()
if client.is_connected():
if self._is_expired_client(client):
client.disconnect()
else:
newpool.append(client)
except IndexError:
self.__pool = newpool
def _is_expired_client(self, client):
if self.client_timeout != -1 and client.is_connected():
delta = client.get_last_state_change_timedelta()
if delta.total_seconds() >= self.client_timeout:
return True
return False
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb)
def _connected_client_release_cb(self, future=None):
client = future.result()
self.release_client(client)
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release()
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break
@tornado.gen.coroutine
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client)
def _make_client(self):
"""Makes and returns a Client object."""
kwargs = self.client_kwargs
client = Client(**kwargs)
return client
| mit | -2,969,162,958,521,919,500 | 35.164179 | 79 | 0.554959 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_virtual_network_taps_operations.py | 1 | 29661 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkTapsOperations(object):
"""VirtualNetworkTapsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
tap_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
"""Gets information about the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkTap, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
tap_name, # type: str
parameters, # type: "_models.VirtualNetworkTap"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkTap')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
tap_name, # type: str
parameters, # type: "_models.VirtualNetworkTap"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkTap"]
"""Creates or updates a Virtual Network Tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:param parameters: Parameters supplied to the create or update virtual network tap operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
tap_name, # type: str
tap_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkTap"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
tap_name, # type: str
tap_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkTap"]
"""Updates an VirtualNetworkTap tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the tap.
:type tap_name: str
:param tap_parameters: Parameters supplied to update VirtualNetworkTap tags.
:type tap_parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
tap_parameters=tap_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkTapListResult"]
"""Gets all the VirtualNetworkTaps in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkTapListResult"]
"""Gets all the VirtualNetworkTaps in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
| mit | 1,230,676,813,930,605,800 | 47.704433 | 191 | 0.634672 | false |
felipeZ/nonAdiabaticCoupling | nac/integrals/nonAdiabaticCoupling.py | 1 | 5341 | __all__ = ['calculate_couplings_3points', 'calculate_couplings_levine',
'compute_overlaps_for_coupling', 'correct_phases']
from compute_integrals import compute_integrals_couplings
from nac.common import (
Matrix, Tensor3D, retrieve_hdf5_data, tuplesXYZ_to_plams)
from os.path import join
import numpy as np
import os
import uuid
def calculate_couplings_3points(
dt: float, mtx_sji_t0: Matrix, mtx_sij_t0: Matrix,
mtx_sji_t1: Matrix, mtx_sij_t1: Matrix) -> None:
"""
Calculate the non-adiabatic interaction matrix using 3 geometries,
the CGFs for the atoms and molecular orbitals coefficients read
from a HDF5 File.
"""
cte = 1.0 / (4.0 * dt)
return cte * (3 * (mtx_sji_t1 - mtx_sij_t1) + (mtx_sij_t0 - mtx_sji_t0))
def calculate_couplings_levine(dt: float, w_jk: Matrix,
w_kj: Matrix) -> Matrix:
"""
Compute the non-adiabatic coupling according to:
`Evaluation of the Time-Derivative Coupling for Accurate Electronic
State Transition Probabilities from Numerical Simulations`.
Garrett A. Meek and Benjamin G. Levine.
dx.doi.org/10.1021/jz5009449 | J. Phys. Chem. Lett. 2014, 5, 2351−2356
NOTE:
In numpy sinc is defined as sin(pi * x) / (pi * x)
"""
# Diagonal matrix
w_jj = np.diag(np.diag(w_jk))
w_kk = np.diag(np.diag(w_kj))
# remove the values from the diagonal
np.fill_diagonal(w_jk, 0)
np.fill_diagonal(w_kj, 0)
# Components A + B
acos_w_jj = np.arccos(w_jj)
asin_w_jk = np.arcsin(w_jk)
a = acos_w_jj - asin_w_jk
b = acos_w_jj + asin_w_jk
A = - np.sinc(a / np.pi)
B = np.sinc(b / np.pi)
# Components C + D
acos_w_kk = np.arccos(w_kk)
asin_w_kj = np.arcsin(w_kj)
c = acos_w_kk - asin_w_kj
d = acos_w_kk + asin_w_kj
C = np.sinc(c / np.pi)
D = np.sinc(d / np.pi)
# Components E
w_lj = np.sqrt(1 - (w_jj ** 2) - (w_kj ** 2))
w_lk = -(w_jk * w_jj + w_kk * w_kj) / w_lj
asin_w_lj = np.arcsin(w_lj)
asin_w_lk = np.arcsin(w_lk)
asin_w_lj2 = asin_w_lj ** 2
asin_w_lk2 = asin_w_lk ** 2
t1 = w_lj * w_lk * asin_w_lj
x1 = np.sqrt((1 - w_lj ** 2) * (1 - w_lk ** 2)) - 1
t2 = x1 * asin_w_lk
t = t1 + t2
E_nonzero = 2 * asin_w_lj * t / (asin_w_lj2 - asin_w_lk2)
# Check whether w_lj is different of zero
E1 = np.where(np.abs(w_lj) > 1e-8, E_nonzero, np.zeros(A.shape))
# Check whether w_lj is different of zero
E = np.where(np.isclose(asin_w_lj2, asin_w_lk2), w_lj ** 2, E1)
cte = 1 / (2 * dt)
return cte * (np.arccos(w_jj) * (A + B) + np.arcsin(w_kj) * (C + D) + E)
def correct_phases(overlaps: Tensor3D, mtx_phases: Matrix) -> list:
"""
Correct the phases for all the overlaps
"""
nOverlaps = overlaps.shape[0] # total number of overlap matrices
dim = overlaps.shape[1] # Size of the square matrix
for k in range(nOverlaps):
# Extract phases
phases_t0, phases_t1 = mtx_phases[k: k + 2]
phases_t0 = phases_t0.reshape(dim, 1)
phases_t1 = phases_t1.reshape(1, dim)
mtx_phases_Sji_t0_t1 = np.dot(phases_t0, phases_t1)
# Update array with the fixed phases
overlaps[k] *= mtx_phases_Sji_t0_t1
return overlaps
def compute_overlaps_for_coupling(
config: dict, pair_molecules: tuple, coefficients: tuple) -> tuple:
"""
Compute the Overlap matrices used to compute the couplings
:returns: [Matrix] containing the overlaps at different times
"""
# Atomic orbitals overlap
suv = calcOverlapMtx(config, pair_molecules)
# Read Orbitals Coefficients
css0, css1 = coefficients
return np.dot(css0.T, np.dot(suv, css1))
def read_overlap_data(config: dict, mo_paths: list) -> tuple:
"""
Read the Molecular orbital coefficients and the transformation matrix
"""
mos = retrieve_hdf5_data(config.path_hdf5, mo_paths)
# Extract a subset of molecular orbitals to compute the coupling
lowest, highest = compute_range_orbitals(config)
css0, css1 = tuple(map(lambda xs: xs[:, lowest: highest], mos))
return css0, css1
def compute_range_orbitals(config: dict) -> tuple:
"""
Compute the lowest and highest index used to extract
a subset of Columns from the MOs
"""
lowest = config.nHOMO - (config.mo_index_range[0] + config.active_space[0])
highest = config.nHOMO + config.active_space[1] - config.mo_index_range[0]
return lowest, highest
def calcOverlapMtx(config: dict, molecules: tuple) -> Matrix:
"""
Parallel calculation of the overlap matrix using the libint2 library
at two different geometries: R0 and R1.
"""
mol_i, mol_j = tuple(tuplesXYZ_to_plams(x) for x in molecules)
# unique molecular paths
path_i = join(config["scratch_path"],
f"molecule_{uuid.uuid4()}.xyz")
path_j = join(config["scratch_path"],
"molecule_{uuid.uuid4()}.xyz")
# Write the molecules in atomic units
mol_i.write(path_i)
mol_j.write(path_j)
basis_name = config["cp2k_general_settings"]["basis"]
try:
integrals = compute_integrals_couplings(
path_i, path_j, config["path_hdf5"], basis_name)
finally:
os.remove(path_i)
os.remove(path_j)
return integrals
| mit | -2,166,787,619,494,257,400 | 30.040698 | 79 | 0.617719 | false |
wkerzendorf/wsynphot | wsynphot/base.py | 1 | 15987 | # defining the base filter curve classes
import os
from scipy import interpolate
from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D
import pandas as pd
from wsynphot.io.cache_filters import load_filter_index, load_transmission_data
from astropy import units as u, constants as const
from astropy import utils
import numpy as np
from wsynphot.calibration import get_vega_calibration_spectrum
def calculate_filter_flux_density(spectrum, filter):
"""
Calculate the average flux through the filter by evaluating the integral
..math::
f_lambda = \\frac{\\int_}{}
Parameters
----------
spectrum: ~specutils.Spectrum1D
spectrum object
filter: ~wsynphot.FilterCurve
:return:
"""
filtered_spectrum = filter * spectrum
filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength,
filtered_spectrum.wavelength)
return filter_flux_density
def calculate_vega_magnitude(spectrum, filter):
filter_flux_density = calculate_filter_flux_density(spectrum, filter)
wavelength_delta = filter.calculate_wavelength_delta()
filtered_f_lambda = (filter_flux_density / wavelength_delta)
zp_vega_f_lambda = filter.zp_vega_f_lambda
return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda)
def calculate_ab_magnitude(spectrum, filter):
filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) /
filter.calculate_wavelength_delta())
return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda)
def list_filters():
"""
List available filter sets along with their properties
"""
return load_filter_index()
class BaseFilterCurve(object):
"""
Basic filter curve class
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength for filter curve
transmission_lambda: numpy.ndarray
transmission_lambda for filter curve
interpolation_kind: str
allowed interpolation kinds given in scipy.interpolate.interp1d
"""
@classmethod
def load_filter(cls, filter_id=None, interpolation_kind='linear'):
"""
Parameters
----------
filter_id: str or None
if None is provided will return a DataFrame of all filters
interpolation_kind: str
see scipy.interpolation.interp1d
"""
if filter_id is None:
return list_filters()
else:
filter = load_transmission_data(filter_id)
wavelength_unit = 'angstrom'
wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit)
return cls(wavelength, filter['Transmission'].values,
interpolation_kind=interpolation_kind,
filter_id=filter_id)
def __init__(self, wavelength, transmission_lambda,
interpolation_kind='linear', filter_id=None):
if not hasattr(wavelength, 'unit'):
raise ValueError('the wavelength needs to be a astropy quantity')
self.wavelength = wavelength
self.transmission_lambda = transmission_lambda
self.interpolation_object = interpolate.interp1d(self.wavelength,
self.transmission_lambda,
kind=interpolation_kind,
bounds_error=False,
fill_value=0.0)
self.filter_id = filter_id
def __mul__(self, other):
if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'):
raise ValueError('requiring a specutils.Spectrum1D-like object that'
'has attributes "flux" and "wavelength"')
#new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value,
# self.wavelength.value) * self.wavelength.unit
transmission = self.interpolate(other.wavelength)
return Spectrum1D.from_array(other.wavelength, transmission * other.flux)
def __rmul__(self, other):
return self.__mul__(other)
@utils.lazyproperty
def lambda_pivot(self):
"""
Calculate the pivotal wavelength as defined in Bessell & Murphy 2012
.. math::
\\lambda_\\textrm{pivot} = \\sqrt{
\\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\
<f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c}
"""
return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/
(np.trapz(self.transmission_lambda / self.wavelength, self.wavelength))))
@utils.lazyproperty
def wavelength_start(self):
return self.get_wavelength_start()
@utils.lazyproperty
def wavelength_end(self):
return self.get_wavelength_end()
@utils.lazyproperty
def zp_ab_f_lambda(self):
return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to(
'erg/s/cm^2/Angstrom', u.spectral())
@utils.lazyproperty
def zp_ab_f_nu(self):
return (3631 * u.Jy).to('erg/s/cm^2/Hz')
@utils.lazyproperty
def zp_vega_f_lambda(self):
return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) /
self.calculate_wavelength_delta())
def interpolate(self, wavelength):
"""
Interpolate the filter onto new wavelength grid
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength grid to interpolate on
"""
converted_wavelength = wavelength.to(self.wavelength.unit)
return self.interpolation_object(converted_wavelength)
def _calculuate_flux_density(self, wavelength, flux):
return _calculcate_filter_flux_density(flux, self)
def calculate_flux_density(self, spectrum):
return calculate_filter_flux_density(spectrum, self)
def calculate_f_lambda(self, spectrum):
return (self.calculate_flux_density(spectrum) /
self.calculate_wavelength_delta())
def calculate_wavelength_delta(self):
"""
Calculate the Integral :math:`\integral
:return:
"""
return np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength)
def calculate_weighted_average_wavelength(self):
"""
Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}`
Returns
: ~astropy.units.Quantity
"""
return (np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength) / self.calculate_wavelength_delta())
def calculate_vega_magnitude(self, spectrum):
__doc__ = calculate_vega_magnitude.__doc__
return calculate_vega_magnitude(spectrum, self)
def calculate_ab_magnitude(self, spectrum):
__doc__ = calculate_ab_magnitude.__doc__
return calculate_ab_magnitude(spectrum, self)
def convert_ab_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_ab_f_lambda
def convert_vega_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_vega_f_lambda
def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={},
format_filter_id=None):
if scale_max is not None:
if hasattr(scale_max, 'unit'):
scale_max = scale_max.value
transmission = (self.transmission_lambda * scale_max
/ self.transmission_lambda.max())
else:
transmission = self.transmission_lambda
ax.plot(self.wavelength, transmission, **plot_kwargs)
ax.set_xlabel('Wavelength [{0}]'.format(
self.wavelength.unit.to_string(format='latex')))
ax.set_ylabel('Transmission [1]')
if make_label==True and self.filter_id is not None:
if format_filter_id is not None:
filter_id = format_filter_id(self.filter_id)
else:
filter_id = self.filter_id
text_x = (self.lambda_pivot).value
text_y = transmission.max()/2
ax.text(text_x, text_y, filter_id,
horizontalalignment='center', verticalalignment='center',
bbox=dict(facecolor='white', alpha=0.5))
def get_wavelength_start(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(threshold)]
def get_wavelength_end(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)]
class FilterCurve(BaseFilterCurve):
def __repr__(self):
if self.filter_id is None:
filter_id = "{0:x}".format(self.__hash__())
else:
filter_id = self.filter_id
return "FilterCurve <{0}>".format(filter_id)
class FilterSet(object):
"""
A set of filters
Parameters
----------
filter_set: ~list
a list of strings or a list of filters
interpolation_kind: ~str
scipy interpolaton kinds
"""
def __init__(self, filter_set, interpolation_kind='linear'):
if hasattr(filter_set[0], 'wavelength'):
self.filter_set = filter_set
else:
self.filter_set = [FilterCurve.load_filter(filter_id,
interpolation_kind=
interpolation_kind)
for filter_id in filter_set]
def __iter__(self):
self.current_filter_idx = 0
return self
def __next__(self):
try:
item = self.filter_set[self.current_filter_idx]
except IndexError:
raise StopIteration
self.current_filter_idx += 1
return item
next = __next__
def __getitem__(self, item):
return self.filter_set.__getitem__(item)
def __repr__(self):
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(
[item.filter_id
for item in self.filter_set]))
@property
def lambda_pivot(self):
return u.Quantity([item.lambda_pivot for item in self])
def calculate_f_lambda(self, spectrum):
return u.Quantity(
[item.calculate_f_lambda(spectrum) for item in self.filter_set])
def calculate_ab_magnitudes(self, spectrum):
mags = [item.calculate_ab_magnitude(spectrum)
for item in self.filter_set]
return mags
def calculate_vega_magnitudes(self, spectrum):
mags = [item.calculate_vega_magnitude(spectrum)
for item in self.filter_set]
return mags
def convert_ab_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_ab_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_vega_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def plot_spectrum(self, spectrum, ax, make_labels=True,
spectrum_plot_kwargs={}, filter_plot_kwargs={},
filter_color_list=None, format_filter_id=None):
"""
plot a spectrum with the given filters
spectrum:
ax:
make_labels:
:return:
"""
ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs)
for i, filter in enumerate(self.filter_set):
filter_scale = filter.calculate_f_lambda(spectrum)
if filter_color_list is not None:
filter_plot_kwargs['color'] = filter_color_list[i]
filter.plot(ax, scale_max=filter_scale, make_label=make_labels,
plot_kwargs=filter_plot_kwargs,
format_filter_id=format_filter_id)
class MagnitudeSet(FilterSet):
def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None,
interpolation_kind='linear'):
super(MagnitudeSet, self).__init__(filter_set,
interpolation_kind=
interpolation_kind)
self.magnitudes = np.array(magnitudes)
self.magnitude_uncertainties = np.array(magnitude_uncertainties)
def __repr__(self):
mag_str = '{0} {1:.4f} +/- {2:.4f}'
mag_data = []
for i, filter in enumerate(self.filter_set):
unc = (np.nan if self.magnitude_uncertainties is None
else self.magnitude_uncertainties[i])
mag_data.append(mag_str.format(filter.filter_id,
self.magnitudes[i], unc))
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(mag_data))
| bsd-3-clause | 6,882,483,729,775,752,000 | 33.454741 | 108 | 0.585851 | false |
ChrisTimperley/PythonCGum | cgum/program.py | 1 | 5861 | from cgum.basic import *
from cgum.utility import FNULL
from pprint import pprint
import cgum.statement as statement
import cgum.expression as expression
import cgum.preprocessor as preprocessor
import cgum.typs as typs
from subprocess import Popen, CalledProcessError
import os.path
import json
import tempfile
import codecs
# TODO: Probe
class Asm(Node):
CODE = "260800"
LABEL = "Asm"
def __init__(self, pos, length, label, children):
assert label is None
super().__init__(pos, length, label, children)
class Label(Node):
CODE = "270100"
LABEL = "Label"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) in [1, 2]
assert isinstance(children[0], GenericString)
super().__init__(pos, length, label, children)
def name(self):
return self.__children[0].to_s()
def statement(self):
children = self.children()
if len(children) == 2:
return children[1]
return None
class FunctionParameter(Node):
CODE = "220100"
LABEL = "ParameterType"
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) <= 2
# Find the optional type and name of this parameter
tmp = children.copy()
self.__typ = \
tmp.pop(0) if (tmp and isinstance(tmp[0], typs.FullType)) else None
self.__name = tmp.pop(0) if tmp else None
assert self.__typ is None or isinstance(self.__typ, typs.FullType)
assert self.__name is None or isinstance(self.__name, GenericString)
super().__init__(pos, length, label, children)
def is_incomplete(self):
return self.name() is None
def typ(self):
return self.__typ.to_s() if self.__typ else None
def name(self):
return self.__name.to_s() if self.__name else None
class FunctionParameters(Node):
CODE = "200000"
LABEL = "ParamList"
def __init__(self, pos, length, label, children):
assert label is None
assert all([isinstance(c, FunctionParameter) for c in children])
super().__init__(pos, length, label, children)
def parameters(self):
return self.__children
class FunctionDefinition(Node):
CODE = "380000"
LABEL = "Definition"
@staticmethod
def from_json(jsn):
return FunctionDefinition(jsn['pos'], name, params, block, storage, dots)
def __init__(self, pos, length, label, children):
assert len(children) >= 3 and len(children) <= 5
tmp = children.copy()
self.__storage = \
tmp.pop(0) if isinstance(tmp[0], typs.Storage) else None
self.__parameters = tmp.pop(0)
self.__dots = \
tmp.pop(0) if isinstance(tmp[0], typs.DotsParameter) else None
self.__name = tmp.pop(0)
self.__block = tmp.pop(0)
assert isinstance(self.__parameters, FunctionParameters)
assert self.__dots is None or \
isinstance(self.__dots, typs.DotsParameter)
assert self.__storage is None or \
isinstance(self.__storage, typs.Storage)
assert isinstance(self.__name, GenericString)
assert isinstance(self.__block, statement.Block)
super().__init__(pos, length, label, children)
def name(self):
return self.__name
def parameters(self):
return self.__parameters
def block(self):
return self.__block
def storage(self):
return self.__storage
def dots(self):
return self.__dots
def is_variadic(self):
return not (self.dots() is None)
# Used to mark the end of the program!
class FinalDef(Token):
CODE = "450800"
LABEL = "FinalDef"
# Represents the root AST node for a program
# For now we just get all the "components" of a program and worry about what
# kind of components they might be later.
#
# Throw away the FinalDef
class Program(Node):
CODE = "460000"
LABEL = "Program"
# Generates an AST for a given source code file, using GumTree and CGum
@staticmethod
def from_source_file(fn):
tmp_f = tempfile.NamedTemporaryFile()
Program.parse_to_json_file(fn, tmp_f)
return Program.from_json_file(tmp_f.name)
# Parses a JSON CGum AST, stored in a file at a specified location, into an
# equivalent, Python representation
@staticmethod
def from_json_file(fn):
#print("Attempting to read CGum AST from a JSON file: %s" % fn)
assert os.path.isfile(fn), "file not found"
with codecs.open(fn, 'r', 'utf-8') as f:
program = Node.from_json(json.load(f)['root'])
#print("Finished converting CGum AST from JSON into Python")
program.renumber()
return program
def __init__(self, pos, length, label, children):
assert label is None
assert len(children) >= 1
assert isinstance(children[-1], FinalDef)
children.pop()
super().__init__(pos, length, label, children)
@staticmethod
def parse_to_json_file(src_fn, jsn_f):
with tempfile.TemporaryFile() as f_err:
cmd = "gumtree parse \"%s\"" % src_fn
p = Popen(cmd, shell=True, stdin=FNULL, stdout=jsn_f, stderr=f_err)
code = p.wait()
# read the contents of the standard error
f_err.seek(0)
err = str(f_err.read())[2:-1]
# ensure the exit status was zero
if code != 0:
raise Exception("ERROR [PyCGum/parse_to_json_file]: unexpected exit code - %s" % error)
# run-time exceptions can occur whilst still returning an exit status
# of zero
elif err.startswith("java.lang.RuntimeException:"):
raise Exception("ERROR [PyCGum/parse_to_json_file]: %s" % err)
| mit | 3,467,776,033,047,293,000 | 31.743017 | 103 | 0.613888 | false |
greenonion/pytvd | tvdip.py | 1 | 8146 | """
tvdip.py
~~~~~~~~
This module is a direct port of the original [1] tvdip Matlab script into
NumPy.
[1] M.A. Little, Nick S. Jones (2010) "Sparse Bayesian Step-Filtering for High-
Throughput Analysis of Molecular Machine Dynamics", in 2010 IEEE International
Conference on Acoustics, Speech and Signal Processing, 2010, ICASSP 2010
Proceedings.
"""
import numpy as np
import scipy as Sci
from scipy import sparse
from scipy.sparse import linalg
import sys
def tvdiplmax(y):
"""Calculate the value of lambda so that if lambda >= lambdamax, the TVD
functional solved by TVDIP is minimized by the trivial constant solution
x = mean(y). This can then be used to determine a useful range of values
of lambda, for example.
Args:
y: Original signal to denoise, size N x 1.
Returns:
lambdamax: Value of lambda at which x = mean(y) is the output of the
TVDIP function.
"""
N = y.size
M = N - 1
# Construct sparse operator matrices
I1 = sparse.eye(M)
O1 = sparse.dia_matrix((M, 1))
D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1])
DDT = D.dot(D.conj().T)
Dy = D.dot(y)
lambdamax = np.absolute(linalg.spsolve(DDT, Dy)).max(0)
return lambdamax
def tvdip(y, lambdas, display=1, stoptol=1e-3, maxiter=60):
"""Performs discrete total variation denoising (TVD) using a primal-dual
interior-point solver. It minimizes the following discrete functional:
E=(1/2)||y-x||_2^2+lambda*||Dx||_1
over the variable x, given the input signal y, according to each value of
the regularization parametero lambda > 0. D is the first difference matrix.
Uses hot-restarts from each value of lambda to speed up convergence for
subsequent values: best use of the feature is made by ensuring that the
chosen lambda values are close to each other.
Args:
y: Original signal to denoise, size N x 1.
lambdas: A vector of positive regularization parameters, size L x 1.
TVD will be applied to each value in the vector.
display: (Optional) Set to 0 to turn off progress display, 1 to turn
on. Defaults to 1.
stoptol: (Optional) Precision as determined by duality gap tolerance,
if not specified defaults to 1e-3.
maxiter: (Optional) Maximum interior-point iterations, if not specified
defaults to 60.
Returns:
x: Denoised output signal for each value of lambda, size N x L.
E: Objective functional at minimum for each lamvda, size L x 1.
s: Optimization result, 1 = solved, 0 = maximum iterations
exceeded before reaching duality gap tolerance, size L x 1.
lambdamax: Maximum value of lambda for the given y. If
lambda >= lambdamax, the output is the trivial constant solution
x = mean(y).
Example:
>>> import numpy as np
>>> import tvdip as tv
>>> # Find the value of lambda greater than which the TVD solution is
>>> # just the mean.
>>> lmax = tv.tvdiplmax(y)
>>> # Perform TV denoising for lambda across a range of values up to a
>>> # small fraction of the maximum found above.
>>> lratio = np.array([1e-4, 1e-3, 1e-2, 1e-1])
>>> x, E, status, l_max = tv.tvdip(y, lmax*lratio, True, 1e-3)
>>> plot(x[:,0])
"""
# Search tuning parameters
ALPHA = 0.01 # Backtracking linesearch parameter (0,0.5]
BETA = 0.5 # Backtracking linesearch parameter (0,1)
MAXLSITER = 20 # Max iterations of backtracking linesearch
MU = 2 # t update
N = y.size # Length of input signal y
M = N - 1 # Size of Dx
# Construct sparse operator matrices
I1 = sparse.eye(M)
O1 = sparse.dia_matrix((M, 1))
D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1])
DDT = D.dot(D.conj().T)
Dy = D.dot(y)
# Find max value of lambda
lambdamax = (np.absolute(linalg.spsolve(DDT, Dy))).max(0)
if display:
print "lambda_max=%5.2e" % lambdamax
L = lambdas.size
x = np.zeros((N, L))
s = np.zeros((L, 1))
E = np.zeros((L, 1))
# Optimization variables set up once at the start
z = np.zeros((M, 1))
mu1 = np.ones((M, 1))
mu2 = np.ones((M, 1))
# Work through each value of lambda, with hot-restart on optimization
# variables
for idx, l in enumerate(lambdas):
t = 1e-10
step = np.inf
f1 = z - l
f2 = -z - l
# Main optimization loop
s[idx] = 1
if display:
print "Solving for lambda={0:5.2e}, lambda/lambda_max={1:5.2e}".format(l, l/lambdamax)
print "Iter# primal Dual Gap"
for iters in xrange(maxiter):
DTz = (z.conj().T * D).conj().T
DDTz = D.dot(DTz)
w = Dy - (mu1 - mu2)
# Calculate objectives and primal-dual gap
pobj1 = 0.5*w.conj().T.dot(linalg.spsolve(DDT,w))+l*(np.sum(mu1+mu2))
pobj2 = 0.5*DTz.conj().T.dot(DTz)+l*np.sum(np.absolute(Dy-DDTz))
pobj = np.minimum(pobj1, pobj2)
dobj = -0.5*DTz.conj().T.dot(DTz) + Dy.conj().T.dot(z)
gap = pobj - dobj
if display:
print "{:5d} {:7.2e} {:7.2e} {:7.2e}".format(iters, pobj[0, 0],
dobj[0, 0],
gap[0, 0])
# Test duality gap stopping criterion
if gap <= stoptol:
s[idx] = 1
break
if step >= 0.2:
t = np.maximum(2*M*MU/gap, 1.2*t)
# Do Newton step
rz = DDTz - w
Sdata = (mu1/f1 + mu2/f2)
S = DDT-sparse.csc_matrix((Sdata.reshape(Sdata.size),
(np.arange(M), np.arange(M))))
r = -DDTz + Dy + (1/t)/f1 - (1/t)/f2
dz = linalg.spsolve(S, r).reshape(r.size, 1)
dmu1 = -(mu1+((1/t)+dz*mu1)/f1)
dmu2 = -(mu2+((1/t)-dz*mu2)/f2)
resDual = rz.copy()
resCent = np.vstack((-mu1*f1-1/t, -mu2*f2-1/t))
residual = np.vstack((resDual, resCent))
# Perform backtracking linesearch
negIdx1 = dmu1 < 0
negIdx2 = dmu2 < 0
step = 1
if np.any(negIdx1):
step = np.minimum(step,
0.99*(-mu1[negIdx1]/dmu1[negIdx1]).min(0))
if np.any(negIdx2):
step = np.minimum(step,
0.99*(-mu2[negIdx2]/dmu2[negIdx2]).min(0))
for _ in xrange(MAXLSITER):
newz = z + step*dz
newmu1 = mu1 + step*dmu1
newmu2 = mu2 + step*dmu2
newf1 = newz - l
newf2 = -newz - l
# Update residuals
newResDual = DDT.dot(newz) - Dy + newmu1 - newmu2
newResCent = np.vstack((-newmu1*newf1-1/t, -newmu2*newf2-1/t))
newResidual = np.vstack((newResDual, newResCent))
if (np.maximum(newf1.max(0), newf2.max(0)) < 0
and (Sci.linalg.norm(newResidual) <=
(1-ALPHA*step)*Sci.linalg.norm(residual))):
break
step = BETA * step
# Update primal and dual optimization parameters
z = newz
mu1 = newmu1
mu2 = newmu2
f1 = newf1
f2 = newf2
x[:, idx] = (y-D.conj().T.dot(z)).reshape(x.shape[0])
xval = x[:, idx].reshape(x.shape[0], 1)
E[idx] = 0.5*np.sum((y-xval)**2)+l*np.sum(np.absolute(D.dot(xval)))
# We may have a close solution that does not satisfy the duality gap
if iters >= maxiter:
s[idx] = 0
if display:
if s[idx]:
print("Solved to precision of duality gap %5.2e") % gap
else:
print("Max iterations exceeded - solution may be inaccurate")
return x, E, s, lambdamax
| gpl-2.0 | -8,849,415,337,858,050,000 | 33.811966 | 98 | 0.545912 | false |
Hitechverma/zamboni | mkt/webapps/indexers.py | 1 | 19450 | from operator import attrgetter
from django.core.urlresolvers import reverse
from django.db.models import Min
import commonware.log
from elasticsearch_dsl import F
from elasticsearch_dsl.filter import Bool
import mkt
from mkt.constants import APP_FEATURES
from mkt.constants.applications import DEVICE_GAIA
from mkt.prices.models import AddonPremium
from mkt.search.indexers import BaseIndexer
from mkt.search.utils import Search
from mkt.tags.models import attach_tags
from mkt.translations.models import attach_trans_dict
log = commonware.log.getLogger('z.addons')
class WebappIndexer(BaseIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
'owners',
'features',
# 'name' and 'description', as well as the locale variants, are only
# used for filtering. The fields that are used by the API are
# 'name_translations' and 'description_translations'.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
)
"""
Bunch of ES stuff for Webapp include mappings, indexing, search.
"""
@classmethod
def search(cls, using=None):
"""
Returns a `Search` object.
We override this to use our patched version which adds statsd timing.
"""
return (Search(
using=using or cls.get_es(), index=cls.get_index(),
doc_type=cls.get_mapping_type_name())
.extra(_source={'exclude': cls.hidden_fields}))
@classmethod
def get_mapping_type_name(cls):
"""
Returns mapping type name which is used as the key in ES_INDEXES to
determine which index to use.
We override this because Webapp is a proxy model to Addon.
"""
return 'webapp'
@classmethod
def get_model(cls):
from mkt.webapps.models import Webapp
return Webapp
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
# App fields.
'id': {'type': 'long'},
'app_slug': {'type': 'string'},
'app_type': {'type': 'byte'},
'author': {
'type': 'string',
'analyzer': 'default_icu',
'fields': {
# For exact matches. The simple analyzer allows
# for case-insensitive matching.
'raw': {'type': 'string',
'analyzer': 'exact_lowercase'},
},
},
'banner_regions': cls.string_not_indexed(),
'bayesian_rating': {'type': 'float', 'doc_values': True},
'category': cls.string_not_analyzed(),
'content_descriptors': cls.string_not_indexed(),
'content_ratings': {
'type': 'object',
'dynamic': 'true',
},
'created': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'current_version': cls.string_not_indexed(),
'default_locale': cls.string_not_indexed(),
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'device': {'type': 'byte'},
# The date this app was added to the escalation queue.
'escalation_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'features': {
'type': 'object',
'properties': dict(
('has_%s' % f.lower(), {'type': 'boolean'})
for f in APP_FEATURES)
},
'file_size': {'type': 'long'},
'guid': cls.string_not_analyzed(),
'has_public_stats': {'type': 'boolean'},
'icon_hash': cls.string_not_indexed(),
'interactive_elements': cls.string_not_indexed(),
'installs_allowed_from': cls.string_not_analyzed(),
'is_disabled': {'type': 'boolean'},
'is_escalated': {'type': 'boolean'},
'is_offline': {'type': 'boolean'},
'is_priority': {'type': 'boolean'},
'is_rereviewed': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'latest_version': {
'type': 'object',
'properties': {
'status': {'type': 'byte'},
'is_privileged': {'type': 'boolean'},
'has_editor_comment': {'type': 'boolean'},
'has_info_request': {'type': 'boolean'},
'nomination_date': {'type': 'date',
'format': 'dateOptionalTime'},
'created_date': {'type': 'date',
'format': 'dateOptionalTime'},
},
},
'manifest_url': cls.string_not_analyzed(),
'modified': {'format': 'dateOptionalTime',
'type': 'date'},
# Name for searching. This is a list of all the localized
# names for the app. We add "position_offset_gap" to work
# around the fact that ES stores the same list of tokens as
# if this were a single string. The offset gap adds 100
# positions between each name and ensures one string from
# one name and one string from another name won't both
# match with a phrase match query.
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
# Name for suggestions.
'name_suggest': {'type': 'completion', 'payloads': True},
'owners': {'type': 'long'},
'package_path': cls.string_not_indexed(),
'premium_type': {'type': 'byte'},
'previews': {
'type': 'object',
'dynamic': 'true',
},
'price_tier': cls.string_not_indexed(),
'ratings': {
'type': 'object',
'properties': {
'average': {'type': 'float'},
'count': {'type': 'short'},
}
},
'region_exclusions': {'type': 'short'},
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
# The date this app was added to the re-review queue.
'rereview_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'status': {'type': 'byte'},
'supported_locales': cls.string_not_analyzed(),
'tags': cls.string_not_analyzed(),
'upsell': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'app_slug': cls.string_not_indexed(),
'icon_url': cls.string_not_indexed(),
'name': cls.string_not_indexed(),
'region_exclusions': {'type': 'short'},
}
},
'uses_flash': {'type': 'boolean'},
'versions': {
'type': 'object',
'properties': {
'version': cls.string_not_indexed(),
'resource_uri': cls.string_not_indexed(),
}
},
}
}
}
# Attach boost field, because we are going to need search by relevancy.
cls.attach_boost_mapping(mapping)
# Attach popularity and trending.
cls.attach_trending_and_popularity_mappings(mapping)
# Add fields that we expect to return all translations.
cls.attach_translation_mappings(
mapping, ('banner_message', 'description', 'homepage',
'name', 'release_notes', 'support_email',
'support_url'))
# Add language-specific analyzers.
cls.attach_language_specific_analyzers(
mapping, ('name', 'description'))
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Extracts the ElasticSearch index document for this instance."""
from mkt.webapps.models import (AppFeatures, attach_devices,
attach_prices, attach_translations,
RatingDescriptors, RatingInteractives)
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach everything we need to index apps.
for transform in (attach_devices, attach_prices, attach_tags,
attach_translations):
transform([obj])
latest_version = obj.latest_version
version = obj.current_version
geodata = obj.geodata
features = (version.features.to_dict()
if version else AppFeatures().to_dict())
try:
status = latest_version.statuses[0][1] if latest_version else None
except IndexError:
status = None
attrs = ('app_slug', 'bayesian_rating', 'created', 'default_locale',
'guid', 'icon_hash', 'id', 'is_disabled', 'is_offline',
'file_size', 'last_updated', 'modified', 'premium_type',
'status', 'uses_flash')
d = dict(zip(attrs, attrgetter(*attrs)(obj)))
d['app_type'] = obj.app_type_id
d['author'] = obj.developer_name
d['banner_regions'] = geodata.banner_regions_slugs()
d['category'] = obj.categories if obj.categories else []
d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
None)
try:
d['content_descriptors'] = obj.rating_descriptors.to_keys()
except RatingDescriptors.DoesNotExist:
d['content_descriptors'] = []
d['current_version'] = version.version if version else None
d['device'] = getattr(obj, 'device_ids', [])
d['features'] = features
d['has_public_stats'] = obj.public_stats
try:
d['interactive_elements'] = obj.rating_interactives.to_keys()
except RatingInteractives.DoesNotExist:
d['interactive_elements'] = []
d['installs_allowed_from'] = (
version.manifest.get('installs_allowed_from', ['*'])
if version else ['*'])
d['is_priority'] = obj.priority_review
is_escalated = obj.escalationqueue_set.exists()
d['is_escalated'] = is_escalated
d['escalation_date'] = (obj.escalationqueue_set.get().created
if is_escalated else None)
is_rereviewed = obj.rereviewqueue_set.exists()
d['is_rereviewed'] = is_rereviewed
d['rereview_date'] = (obj.rereviewqueue_set.get().created
if is_rereviewed else None)
if latest_version:
d['latest_version'] = {
'status': status,
'is_privileged': latest_version.is_privileged,
'has_editor_comment': latest_version.has_editor_comment,
'has_info_request': latest_version.has_info_request,
'nomination_date': latest_version.nomination,
'created_date': latest_version.created,
}
else:
d['latest_version'] = {
'status': None,
'is_privileged': None,
'has_editor_comment': None,
'has_info_request': None,
'nomination_date': None,
'created_date': None,
}
d['manifest_url'] = obj.get_manifest_url()
d['package_path'] = obj.get_package_path()
d['name_sort'] = unicode(obj.name).lower()
d['owners'] = [au.user.id for au in
obj.addonuser_set.filter(role=mkt.AUTHOR_ROLE_OWNER)]
d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
'id': p.id, 'sizes': p.sizes}
for p in obj.previews.all()]
try:
p = obj.addonpremium.price
d['price_tier'] = p.name
except AddonPremium.DoesNotExist:
d['price_tier'] = None
d['ratings'] = {
'average': obj.average_rating,
'count': obj.total_reviews,
}
d['region_exclusions'] = obj.get_excluded_region_ids()
d['reviewed'] = obj.versions.filter(
deleted=False).aggregate(Min('reviewed')).get('reviewed__min')
# The default locale of the app is considered "supported" by default.
supported_locales = [obj.default_locale]
other_locales = (filter(None, version.supported_locales.split(','))
if version else [])
if other_locales:
supported_locales.extend(other_locales)
d['supported_locales'] = list(set(supported_locales))
d['tags'] = getattr(obj, 'tags_list', [])
if obj.upsell and obj.upsell.premium.is_published():
upsell_obj = obj.upsell.premium
d['upsell'] = {
'id': upsell_obj.id,
'app_slug': upsell_obj.app_slug,
'icon_url': upsell_obj.get_icon_url(128),
# TODO: Store all localizations of upsell.name.
'name': unicode(upsell_obj.name),
'region_exclusions': upsell_obj.get_excluded_region_ids()
}
d['versions'] = [dict(version=v.version,
resource_uri=reverse_version(v))
for v in obj.versions.all()]
# Handle localized fields.
# This adds both the field used for search and the one with
# all translations for the API.
for field in ('description', 'name'):
d.update(cls.extract_field_translations(
obj, field, include_field_for_search=True))
# This adds only the field with all the translations for the API, we
# don't need to search on those.
for field in ('homepage', 'support_email', 'support_url'):
d.update(cls.extract_field_translations(obj, field))
if version:
attach_trans_dict(version._meta.model, [version])
d.update(cls.extract_field_translations(
version, 'release_notes', db_field='releasenotes_id'))
else:
d['release_notes_translations'] = None
attach_trans_dict(geodata._meta.model, [geodata])
d.update(cls.extract_field_translations(geodata, 'banner_message'))
# Add boost, popularity, trending values.
d.update(cls.extract_popularity_trending_boost(obj))
# If the app is compatible with Firefox OS, push suggestion data in the
# index - This will be used by RocketbarView API, which is specific to
# Firefox OS.
if DEVICE_GAIA.id in d['device'] and obj.is_published():
d['name_suggest'] = {
'input': d['name'],
'output': unicode(obj.id), # We only care about the payload.
'weight': int(d['boost']),
'payload': {
'default_locale': d['default_locale'],
'icon_hash': d['icon_hash'],
'id': d['id'],
'manifest_url': d['manifest_url'],
'modified': d['modified'],
'name_translations': d['name_translations'],
'slug': d['app_slug'],
}
}
for field in ('name', 'description'):
d.update(cls.extract_field_analyzed_translations(obj, field))
return d
@classmethod
def get_indexable(cls):
"""Returns the queryset of ids of all things to be indexed."""
from mkt.webapps.models import Webapp
return Webapp.with_deleted.all()
@classmethod
def run_indexing(cls, ids, ES=None, index=None, **kw):
"""Override run_indexing to use app transformers."""
from mkt.webapps.models import Webapp
log.info('Indexing %s webapps' % len(ids))
qs = Webapp.with_deleted.filter(id__in=ids)
ES = ES or cls.get_es()
docs = []
for obj in list(qs):
try:
docs.append(cls.extract_document(obj.id, obj=obj))
except Exception as e:
log.error('Failed to index webapp {0}: {1}'
.format(obj.id, repr(e)),
# Trying to chase down a cache-machine problem.
exc_info="marketplace:" in str(e))
cls.bulk_index(docs, es=ES, index=index or cls.get_index())
@classmethod
def filter_by_apps(cls, app_ids, queryset=None):
"""
Filters the given queryset by the given app IDs.
This uses a `should` filter, which is equivalent to an "OR".
"""
queryset = queryset or cls.search()
app_ids = list(set(app_ids)) # De-dupe.
queryset = queryset.filter(Bool(should=[F('terms', id=app_ids)]))
return queryset[0:len(app_ids)]
def reverse_version(version):
"""
The try/except AttributeError allows this to be used where the input is
ambiguous, and could be either an already-reversed URL or a Version object.
"""
if version:
try:
return reverse('version-detail', kwargs={'pk': version.pk})
except AttributeError:
return version
return
| bsd-3-clause | 7,220,130,324,447,549,000 | 40.918103 | 79 | 0.48874 | false |
tobykurien/MakerDroid | assetsrc/public.mp3/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py | 1 | 4561 | """
This page is in the table of contents.
The gts.py script is an import translator plugin to get a carving from an gts file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an gts file and returns the carving.
The GNU Triangulated Surface (.gts) format is described at:
http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
"All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertices, nv, the second is the number of edges, ne and the third is the number of faces, nf.
Follows nv lines containing the x, y and z coordinates of the vertices. Follows ne lines containing the two indices (starting from one) of the vertices of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face.
The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertices, edges or faces). When read with different object classes, these extra attributes are just ignored."
This example gets a carving for the gts file Screw Holder Bottom.gts. This example is run in a terminal in the folder which contains Screw Holder Bottom.gts and gts.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import gts
>>> gts.getCarving()
[11.6000003815, 10.6837882996, 7.80209827423
..
many more lines of the carving
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import gcodec
from fabmetheus_utilities.solids import triangle_mesh
__author__ = "Enrique Perez ([email protected])"
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ):
"Initialize from a GNU Triangulated Surface Text."
if gnuTriangulatedSurfaceText == '':
return None
lines = gcodec.getTextLines( gnuTriangulatedSurfaceText )
linesWithoutComments = []
for line in lines:
if len( line ) > 0:
firstCharacter = line[ 0 ]
if firstCharacter != '#' and firstCharacter != '!':
linesWithoutComments.append( line )
splitLine = linesWithoutComments[ 0 ].split()
numberOfVertices = int( splitLine[ 0 ] )
numberOfEdges = int( splitLine[ 1 ] )
numberOfFaces = int( splitLine[ 2 ] )
faceTriples = []
for vertexIndex in xrange( numberOfVertices ):
line = linesWithoutComments[ vertexIndex + 1 ]
splitLine = line.split()
vertex = Vector3( float( splitLine[ 0 ] ), float( splitLine[ 1 ] ), float( splitLine[ 2 ] ) )
triangleMesh.vertices.append( vertex )
edgeStart = numberOfVertices + 1
for edgeIndex in xrange( numberOfEdges ):
line = linesWithoutComments[ edgeIndex + edgeStart ]
splitLine = line.split()
vertexIndexes = []
for word in splitLine[ : 2 ]:
vertexIndexes.append( int( word ) - 1 )
edge = triangle_mesh.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes )
triangleMesh.edges.append( edge )
faceStart = edgeStart + numberOfEdges
for faceIndex in xrange( numberOfFaces ):
line = linesWithoutComments[ faceIndex + faceStart ]
splitLine = line.split()
edgeIndexes = []
for word in splitLine[ : 3 ]:
edgeIndexes.append( int( word ) - 1 )
face = triangle_mesh.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex )
triangleMesh.faces.append( face )
return triangleMesh
def getCarving( fileName ):
"Get the carving for the gts file."
return getFromGNUTriangulatedSurfaceText( gcodec.getFileText( fileName ), triangle_mesh.TriangleMesh() )
| gpl-3.0 | 1,615,288,114,955,321,900 | 48.043011 | 441 | 0.754221 | false |
DarkFenX/Pyfa | eos/saveddata/targetProfile.py | 1 | 20115 | # ===============================================================================
# Copyright (C) 2014 Ryan Holmes
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import math
import re
from collections import OrderedDict
from logbook import Logger
from sqlalchemy.orm import reconstructor
import eos.db
pyfalog = Logger(__name__)
BUILTINS = OrderedDict([
# 0 is taken by ideal target profile, composed manually in one of TargetProfile methods
(-1, ('Uniform (25%)', 0.25, 0.25, 0.25, 0.25)),
(-2, ('Uniform (50%)', 0.50, 0.50, 0.50, 0.50)),
(-3, ('Uniform (75%)', 0.75, 0.75, 0.75, 0.75)),
(-4, ('Uniform (90%)', 0.90, 0.90, 0.90, 0.90)),
(-5, ('[T1 Resist]Shield', 0.0, 0.20, 0.40, 0.50)),
(-6, ('[T1 Resist]Armor', 0.50, 0.45, 0.25, 0.10)),
(-7, ('[T1 Resist]Hull', 0.33, 0.33, 0.33, 0.33)),
(-8, ('[T1 Resist]Shield (+T2 DCU)', 0.125, 0.30, 0.475, 0.562)),
(-9, ('[T1 Resist]Armor (+T2 DCU)', 0.575, 0.532, 0.363, 0.235)),
(-10, ('[T1 Resist]Hull (+T2 DCU)', 0.598, 0.598, 0.598, 0.598)),
(-11, ('[T2 Resist]Amarr (Shield)', 0.0, 0.20, 0.70, 0.875)),
(-12, ('[T2 Resist]Amarr (Armor)', 0.50, 0.35, 0.625, 0.80)),
(-13, ('[T2 Resist]Caldari (Shield)', 0.20, 0.84, 0.76, 0.60)),
(-14, ('[T2 Resist]Caldari (Armor)', 0.50, 0.8625, 0.625, 0.10)),
(-15, ('[T2 Resist]Gallente (Shield)', 0.0, 0.60, 0.85, 0.50)),
(-16, ('[T2 Resist]Gallente (Armor)', 0.50, 0.675, 0.8375, 0.10)),
(-17, ('[T2 Resist]Minmatar (Shield)', 0.75, 0.60, 0.40, 0.50)),
(-18, ('[T2 Resist]Minmatar (Armor)', 0.90, 0.675, 0.25, 0.10)),
(-19, ('[NPC][Asteroid]Angel Cartel', 0.54, 0.42, 0.37, 0.32)),
(-20, ('[NPC][Asteroid]Blood Raiders', 0.34, 0.39, 0.45, 0.52)),
(-21, ('[NPC][Asteroid]Guristas', 0.55, 0.35, 0.3, 0.48)),
(-22, ('[NPC][Asteroid]Rogue Drones', 0.35, 0.38, 0.44, 0.49)),
(-23, ('[NPC][Asteroid]Sanshas Nation', 0.35, 0.4, 0.47, 0.53)),
(-24, ('[NPC][Asteroid]Serpentis', 0.49, 0.38, 0.29, 0.51)),
(-25, ('[NPC][Deadspace]Angel Cartel', 0.59, 0.48, 0.4, 0.32)),
(-26, ('[NPC][Deadspace]Blood Raiders', 0.31, 0.39, 0.47, 0.56)),
(-27, ('[NPC][Deadspace]Guristas', 0.57, 0.39, 0.31, 0.5)),
(-28, ('[NPC][Deadspace]Rogue Drones', 0.42, 0.42, 0.47, 0.49)),
(-29, ('[NPC][Deadspace]Sanshas Nation', 0.31, 0.39, 0.47, 0.56)),
(-30, ('[NPC][Deadspace]Serpentis', 0.49, 0.38, 0.29, 0.56)),
(-31, ('[NPC][Mission]Amarr Empire', 0.34, 0.38, 0.42, 0.46)),
(-32, ('[NPC][Mission]Caldari State', 0.51, 0.38, 0.3, 0.51)),
(-33, ('[NPC][Mission]CONCORD', 0.47, 0.46, 0.47, 0.47)),
(-34, ('[NPC][Mission]Gallente Federation', 0.51, 0.38, 0.31, 0.52)),
(-35, ('[NPC][Mission]Khanid', 0.51, 0.42, 0.36, 0.4)),
(-36, ('[NPC][Mission]Minmatar Republic', 0.51, 0.46, 0.41, 0.35)),
(-37, ('[NPC][Mission]Mordus Legion', 0.32, 0.48, 0.4, 0.62)),
(-38, ('[NPC][Other]Sleeper', 0.61, 0.61, 0.61, 0.61)),
(-39, ('[NPC][Other]Sansha Incursion', 0.65, 0.63, 0.64, 0.65)),
(-40, ('[NPC][Burner]Cruor (Blood Raiders)', 0.8, 0.73, 0.69, 0.67)),
(-41, ('[NPC][Burner]Dramiel (Angel)', 0.35, 0.48, 0.61, 0.68)),
(-42, ('[NPC][Burner]Daredevil (Serpentis)', 0.69, 0.59, 0.59, 0.43)),
(-43, ('[NPC][Burner]Succubus (Sanshas Nation)', 0.35, 0.48, 0.61, 0.68)),
(-44, ('[NPC][Burner]Worm (Guristas)', 0.48, 0.58, 0.69, 0.74)),
(-45, ('[NPC][Burner]Enyo', 0.58, 0.72, 0.86, 0.24)),
(-46, ('[NPC][Burner]Hawk', 0.3, 0.86, 0.79, 0.65)),
(-47, ('[NPC][Burner]Jaguar', 0.78, 0.65, 0.48, 0.56)),
(-48, ('[NPC][Burner]Vengeance', 0.66, 0.56, 0.75, 0.86)),
(-49, ('[NPC][Burner]Ashimmu (Blood Raiders)', 0.8, 0.76, 0.68, 0.7)),
(-50, ('[NPC][Burner]Talos', 0.68, 0.59, 0.59, 0.43)),
(-51, ('[NPC][Burner]Sentinel', 0.58, 0.45, 0.52, 0.66)),
# Source: ticket #2067
(-52, ('[NPC][Invasion]Invading Precursor Entities', 0.422, 0.367, 0.453, 0.411)),
(-53, ('[NPC][Invasion]Retaliating Amarr Entities', 0.360, 0.310, 0.441, 0.602)),
(-54, ('[NPC][Invasion]Retaliating Caldari Entities', 0.287, 0.610, 0.487, 0.401)),
(-55, ('[NPC][Invasion]Retaliating Gallente Entities', 0.383, 0.414, 0.578, 0.513)),
(-56, ('[NPC][Invasion]Retaliating Minmatar Entities', 0.620, 0.422, 0.355, 0.399)),
(-57, ('[NPC][Abyssal][Dark Matter All Tiers]Drones', 0.439, 0.522, 0.529, 0.435)),
(-58, ('[NPC][Abyssal][Dark Matter All Tiers]Overmind', 0.626, 0.576, 0.612, 0.624)),
(-59, ('[NPC][Abyssal][Dark Matter All Tiers]Seeker', 0.082, 0.082, 0.082, 0.082)),
(-60, ('[NPC][Abyssal][Dark Matter All Tiers]Triglavian', 0.477, 0.401, 0.449, 0.37)),
(-61, ('[NPC][Abyssal][Dark Matter All Tiers]Drifter', 0.403, 0.403, 0.403, 0.403)),
(-62, ('[NPC][Abyssal][Dark Matter All Tiers]Sleeper', 0.435, 0.435, 0.435, 0.435)),
(-63, ('[NPC][Abyssal][Dark Matter All Tiers]All', 0.507, 0.477, 0.502, 0.493)),
(-64, ('[NPC][Abyssal][Electrical T1/T2]Drones', 0.323, 0.522, 0.529, 0.435)),
(-65, ('[NPC][Abyssal][Electrical T1/T2]Overmind', 0.521, 0.576, 0.612, 0.624)),
(-66, ('[NPC][Abyssal][Electrical T1/T2]Seeker', 0, 0.082, 0.082, 0.082)),
(-67, ('[NPC][Abyssal][Electrical T1/T2]Triglavian', 0.333, 0.401, 0.449, 0.37)),
(-68, ('[NPC][Abyssal][Electrical T1/T2]Drifter', 0.267, 0.403, 0.403, 0.403)),
(-69, ('[NPC][Abyssal][Electrical T1/T2]Sleeper', 0.329, 0.435, 0.435, 0.435)),
(-70, ('[NPC][Abyssal][Electrical T1/T2]All', 0.385, 0.477, 0.502, 0.493)),
(-71, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drones', 0.255, 0.522, 0.529, 0.435)),
(-72, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Overmind', 0.457, 0.576, 0.612, 0.624)),
(-73, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Seeker', 0, 0.082, 0.082, 0.082)),
(-74, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Triglavian', 0.241, 0.401, 0.449, 0.37)),
(-75, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drifter', 0.184, 0.403, 0.403, 0.403)),
(-76, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Sleeper', 0.268, 0.435, 0.435, 0.435)),
(-77, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]All', 0.313, 0.477, 0.502, 0.493)),
(-78, ('[NPC][Abyssal][Electrical T4/T5]Drones', 0.193, 0.522, 0.529, 0.435)),
(-79, ('[NPC][Abyssal][Electrical T4/T5]Overmind', 0.398, 0.576, 0.612, 0.624)),
(-80, ('[NPC][Abyssal][Electrical T4/T5]Seeker', 0, 0.082, 0.082, 0.082)),
(-81, ('[NPC][Abyssal][Electrical T4/T5]Triglavian', 0.183, 0.401, 0.449, 0.37)),
(-82, ('[NPC][Abyssal][Electrical T4/T5]Drifter', 0.107, 0.403, 0.403, 0.403)),
(-83, ('[NPC][Abyssal][Electrical T4/T5]Sleeper', 0.215, 0.435, 0.435, 0.435)),
(-84, ('[NPC][Abyssal][Electrical T4/T5]All', 0.25, 0.477, 0.502, 0.493)),
(-85, ('[NPC][Abyssal][Firestorm T1/T2]Drones', 0.461, 0.425, 0.541, 0.443)),
(-86, ('[NPC][Abyssal][Firestorm T1/T2]Overmind', 0.65, 0.469, 0.625, 0.633)),
(-87, ('[NPC][Abyssal][Firestorm T1/T2]Seeker', 0.084, 0, 0.084, 0.084)),
(-88, ('[NPC][Abyssal][Firestorm T1/T2]Triglavian', 0.534, 0.266, 0.484, 0.366)),
(-89, ('[NPC][Abyssal][Firestorm T1/T2]Drifter', 0.422, 0.282, 0.422, 0.422)),
(-90, ('[NPC][Abyssal][Firestorm T1/T2]Sleeper', 0.512, 0.402, 0.512, 0.512)),
(-91, ('[NPC][Abyssal][Firestorm T1/T2]All', 0.541, 0.365, 0.524, 0.504)),
(-92, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drones', 0.461, 0.36, 0.541, 0.443)),
(-93, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Overmind', 0.65, 0.391, 0.625, 0.633)),
(-94, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Seeker', 0.084, 0, 0.084, 0.084)),
(-95, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Triglavian', 0.534, 0.161, 0.484, 0.366)),
(-96, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drifter', 0.422, 0.196, 0.422, 0.422)),
(-97, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Sleeper', 0.512, 0.337, 0.512, 0.512)),
(-98, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]All', 0.541, 0.284, 0.524, 0.504)),
(-99, ('[NPC][Abyssal][Firestorm T4/T5]Drones', 0.461, 0.305, 0.541, 0.443)),
(-100, ('[NPC][Abyssal][Firestorm T4/T5]Overmind', 0.65, 0.323, 0.625, 0.633)),
(-101, ('[NPC][Abyssal][Firestorm T4/T5]Seeker', 0.084, 0, 0.084, 0.084)),
(-102, ('[NPC][Abyssal][Firestorm T4/T5]Triglavian', 0.534, 0.082, 0.484, 0.366)),
(-103, ('[NPC][Abyssal][Firestorm T4/T5]Drifter', 0.422, 0.114, 0.422, 0.422)),
(-104, ('[NPC][Abyssal][Firestorm T4/T5]Sleeper', 0.512, 0.276, 0.512, 0.512)),
(-105, ('[NPC][Abyssal][Firestorm T4/T5]All', 0.541, 0.214, 0.524, 0.504)),
(-106, ('[NPC][Abyssal][Exotic T1/T2]Drones', 0.439, 0.522, 0.417, 0.435)),
(-107, ('[NPC][Abyssal][Exotic T1/T2]Overmind', 0.626, 0.576, 0.496, 0.624)),
(-108, ('[NPC][Abyssal][Exotic T1/T2]Seeker', 0.082, 0.082, 0, 0.082)),
(-109, ('[NPC][Abyssal][Exotic T1/T2]Triglavian', 0.477, 0.401, 0.284, 0.37)),
(-110, ('[NPC][Abyssal][Exotic T1/T2]Drifter', 0.403, 0.403, 0.267, 0.403)),
(-111, ('[NPC][Abyssal][Exotic T1/T2]Sleeper', 0.435, 0.435, 0.329, 0.435)),
(-112, ('[NPC][Abyssal][Exotic T1/T2]All', 0.507, 0.477, 0.373, 0.493)),
(-113, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drones', 0.439, 0.522, 0.351, 0.435)),
(-114, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Overmind', 0.626, 0.576, 0.419, 0.624)),
(-115, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Seeker', 0.082, 0.082, 0, 0.082)),
(-116, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Triglavian', 0.477, 0.401, 0.176, 0.37)),
(-117, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drifter', 0.403, 0.403, 0.184, 0.403)),
(-118, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.268, 0.435)),
(-119, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]All', 0.507, 0.477, 0.293, 0.493)),
(-120, ('[NPC][Abyssal][Exotic T4/T5]Drones', 0.439, 0.522, 0.293, 0.435)),
(-121, ('[NPC][Abyssal][Exotic T4/T5]Overmind', 0.626, 0.576, 0.344, 0.624)),
(-122, ('[NPC][Abyssal][Exotic T4/T5]Seeker', 0.082, 0.082, 0, 0.082)),
(-123, ('[NPC][Abyssal][Exotic T4/T5]Triglavian', 0.477, 0.401, 0.107, 0.37)),
(-124, ('[NPC][Abyssal][Exotic T4/T5]Drifter', 0.403, 0.403, 0.107, 0.403)),
(-125, ('[NPC][Abyssal][Exotic T4/T5]Sleeper', 0.435, 0.435, 0.215, 0.435)),
(-126, ('[NPC][Abyssal][Exotic T4/T5]All', 0.507, 0.477, 0.223, 0.493)),
(-127, ('[NPC][Abyssal][Gamma T1/T2]Drones', 0.449, 0.54, 0.549, 0.336)),
(-128, ('[NPC][Abyssal][Gamma T1/T2]Overmind', 0.6, 0.557, 0.601, 0.504)),
(-129, ('[NPC][Abyssal][Gamma T1/T2]Seeker', 0.085, 0.085, 0.085, 0)),
(-130, ('[NPC][Abyssal][Gamma T1/T2]Triglavian', 0.463, 0.392, 0.447, 0.193)),
(-131, ('[NPC][Abyssal][Gamma T1/T2]Drifter', 0.428, 0.428, 0.428, 0.287)),
(-132, ('[NPC][Abyssal][Gamma T1/T2]Sleeper', 0.435, 0.435, 0.435, 0.329)),
(-133, ('[NPC][Abyssal][Gamma T1/T2]All', 0.493, 0.472, 0.5, 0.362)),
(-134, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drones', 0.449, 0.54, 0.549, 0.264)),
(-135, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Overmind', 0.6, 0.557, 0.601, 0.428)),
(-136, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Seeker', 0.085, 0.085, 0.085, 0)),
(-137, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Triglavian', 0.463, 0.392, 0.447, 0.071)),
(-138, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drifter', 0.428, 0.428, 0.428, 0.2)),
(-139, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.435, 0.268)),
(-140, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]All', 0.493, 0.472, 0.5, 0.28)),
(-141, ('[NPC][Abyssal][Gamma T4/T5]Drones', 0.449, 0.54, 0.549, 0.197)),
(-142, ('[NPC][Abyssal][Gamma T4/T5]Overmind', 0.6, 0.557, 0.601, 0.356)),
(-143, ('[NPC][Abyssal][Gamma T4/T5]Seeker', 0.085, 0.085, 0.085, 0)),
(-144, ('[NPC][Abyssal][Gamma T4/T5]Triglavian', 0.463, 0.392, 0.447, 0.029)),
(-145, ('[NPC][Abyssal][Gamma T4/T5]Drifter', 0.428, 0.428, 0.428, 0.117)),
(-146, ('[NPC][Abyssal][Gamma T4/T5]Sleeper', 0.435, 0.435, 0.435, 0.215)),
(-147, ('[NPC][Abyssal][Gamma T4/T5]All', 0.493, 0.472, 0.5, 0.21))])
class TargetProfile:
# also determined import/export order - VERY IMPORTANT
DAMAGE_TYPES = ('em', 'thermal', 'kinetic', 'explosive')
_idealTarget = None
_builtins = None
def __init__(self, *args, **kwargs):
self.builtin = False
self.update(*args, **kwargs)
@reconstructor
def init(self):
self.builtin = False
def update(self, emAmount=0, thermalAmount=0, kineticAmount=0, explosiveAmount=0, maxVelocity=None, signatureRadius=None, radius=None):
self.emAmount = emAmount
self.thermalAmount = thermalAmount
self.kineticAmount = kineticAmount
self.explosiveAmount = explosiveAmount
self._maxVelocity = maxVelocity
self._signatureRadius = signatureRadius
self._radius = radius
@classmethod
def getBuiltinList(cls):
if cls._builtins is None:
cls.__generateBuiltins()
return list(cls._builtins.values())
@classmethod
def getBuiltinById(cls, id):
if cls._builtins is None:
cls.__generateBuiltins()
return cls._builtins.get(id)
@classmethod
def __generateBuiltins(cls):
cls._builtins = OrderedDict()
for id, data in BUILTINS.items():
rawName = data[0]
data = data[1:]
profile = TargetProfile(*data)
profile.ID = id
profile.rawName = rawName
profile.builtin = True
cls._builtins[id] = profile
@classmethod
def getIdeal(cls):
if cls._idealTarget is None:
cls._idealTarget = cls(
emAmount=0,
thermalAmount=0,
kineticAmount=0,
explosiveAmount=0,
maxVelocity=0,
signatureRadius=None,
radius=0)
cls._idealTarget.rawName = 'Ideal Target'
cls._idealTarget.ID = 0
cls._idealTarget.builtin = True
return cls._idealTarget
@property
def maxVelocity(self):
return self._maxVelocity or 0
@maxVelocity.setter
def maxVelocity(self, val):
self._maxVelocity = val
@property
def signatureRadius(self):
if self._signatureRadius is None or self._signatureRadius == -1:
return math.inf
return self._signatureRadius
@signatureRadius.setter
def signatureRadius(self, val):
if val is not None and math.isinf(val):
val = None
self._signatureRadius = val
@property
def radius(self):
return self._radius or 0
@radius.setter
def radius(self, val):
self._radius = val
@classmethod
def importPatterns(cls, text):
lines = re.split('[\n\r]+', text)
patterns = []
numPatterns = 0
# When we import damage profiles, we create new ones and update old ones. To do this, get a list of current
# patterns to allow lookup
lookup = {}
current = eos.db.getTargetProfileList()
for pattern in current:
lookup[pattern.rawName] = pattern
for line in lines:
try:
if line.strip()[0] == "#": # comments
continue
line = line.split('#', 1)[0] # allows for comments
type, data = line.rsplit('=', 1)
type, data = type.strip(), [d.strip() for d in data.split(',')]
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Data isn't in correct format, continue to next line.")
continue
if type not in ("TargetProfile", "TargetResists"):
continue
numPatterns += 1
name, dataRes, dataMisc = data[0], data[1:5], data[5:8]
fields = {}
for index, val in enumerate(dataRes):
val = float(val) if val else 0
if math.isinf(val):
val = 0
try:
assert 0 <= val <= 100
fields["%sAmount" % cls.DAMAGE_TYPES[index]] = val / 100
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Caught unhandled exception in import patterns.")
continue
if len(dataMisc) == 3:
for index, val in enumerate(dataMisc):
try:
fieldName = ("maxVelocity", "signatureRadius", "radius")[index]
except IndexError:
break
val = float(val) if val else 0
if fieldName != "signatureRadius" and math.isinf(val):
val = 0
fields[fieldName] = val
if len(fields) in (4, 7): # Avoid possible blank lines
if name.strip() in lookup:
pattern = lookup[name.strip()]
pattern.update(**fields)
eos.db.save(pattern)
else:
pattern = TargetProfile(**fields)
pattern.rawName = name.strip()
eos.db.save(pattern)
patterns.append(pattern)
eos.db.commit()
return patterns, numPatterns
EXPORT_FORMAT = "TargetProfile = %s,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f\n"
@classmethod
def exportPatterns(cls, *patterns):
out = "# Exported from pyfa\n#\n"
out += "# Values are in following format:\n"
out += "# TargetProfile = [name],[EM %],[Thermal %],[Kinetic %],[Explosive %],[Max velocity m/s],[Signature radius m],[Radius m]\n\n"
for dp in patterns:
out += cls.EXPORT_FORMAT % (
dp.rawName,
dp.emAmount * 100,
dp.thermalAmount * 100,
dp.kineticAmount * 100,
dp.explosiveAmount * 100,
dp.maxVelocity,
dp.signatureRadius,
dp.radius
)
return out.strip()
@property
def name(self):
return self.rawName
@property
def fullName(self):
categories, tail = self.__parseRawName()
return '{}{}'.format(''.join('[{}]'.format(c) for c in categories), tail)
@property
def shortName(self):
return self.__parseRawName()[1]
@property
def hierarchy(self):
return self.__parseRawName()[0]
def __parseRawName(self):
hierarchy = []
remainingName = self.rawName.strip() if self.rawName else ''
while True:
start, end = remainingName.find('['), remainingName.find(']')
if start == -1 or end == -1:
return hierarchy, remainingName
splitter = remainingName.find('|')
if splitter != -1 and splitter == start - 1:
return hierarchy, remainingName[1:]
hierarchy.append(remainingName[start + 1:end])
remainingName = remainingName[end + 1:].strip()
def __deepcopy__(self, memo):
p = TargetProfile(
self.emAmount, self.thermalAmount, self.kineticAmount, self.explosiveAmount,
self._maxVelocity, self._signatureRadius, self._radius)
p.rawName = "%s copy" % self.rawName
return p
| gpl-3.0 | 6,335,731,424,209,932,000 | 48.666667 | 141 | 0.554412 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/bandwidth_constraints/__init__.py | 1 | 12844 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class bandwidth_constraints(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines bandwidth-constraints. For DS-TE, the
existing Maximum Reservable link bandwidth parameter is retained,
but its semantics is generalized and interpreted as the aggregate
bandwidth constraint across all Class-Types
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "bandwidth-constraints"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"bandwidth-constraints",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class bandwidth_constraints(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines bandwidth-constraints. For DS-TE, the
existing Maximum Reservable link bandwidth parameter is retained,
but its semantics is generalized and interpreted as the aggregate
bandwidth constraint across all Class-Types
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "bandwidth-constraints"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"bandwidth-constraints",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| apache-2.0 | 1,062,767,270,325,623,600 | 37.570571 | 375 | 0.583775 | false |
sserrot/champion_relationships | venv/Lib/site-packages/ipykernel/inprocess/blocking.py | 1 | 3068 | """ Implements a fully blocking kernel client.
Useful for test suites and blocking terminal interfaces.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
try:
from queue import Queue, Empty # Py 3
except ImportError:
from Queue import Queue, Empty # Py 2
# IPython imports
from traitlets import Type
# Local imports
from .channels import (
InProcessChannel,
)
from .client import InProcessKernelClient
class BlockingInProcessChannel(InProcessChannel):
def __init__(self, *args, **kwds):
super(BlockingInProcessChannel, self).__init__(*args, **kwds)
self._in_queue = Queue()
def call_handlers(self, msg):
self._in_queue.put(msg)
def get_msg(self, block=True, timeout=None):
""" Gets a message if there is one that is ready. """
if timeout is None:
# Queue.get(timeout=None) has stupid uninteruptible
# behavior, so wait for a week instead
timeout = 604800
return self._in_queue.get(block, timeout)
def get_msgs(self):
""" Get all messages that are currently ready. """
msgs = []
while True:
try:
msgs.append(self.get_msg(block=False))
except Empty:
break
return msgs
def msg_ready(self):
""" Is there a message that has been received? """
return not self._in_queue.empty()
class BlockingInProcessStdInChannel(BlockingInProcessChannel):
def call_handlers(self, msg):
""" Overridden for the in-process channel.
This methods simply calls raw_input directly.
"""
msg_type = msg['header']['msg_type']
if msg_type == 'input_request':
_raw_input = self.client.kernel._sys_raw_input
prompt = msg['content']['prompt']
print(prompt, end='', file=sys.__stdout__)
sys.__stdout__.flush()
self.client.input(_raw_input())
class BlockingInProcessKernelClient(InProcessKernelClient):
# The classes to use for the various channels.
shell_channel_class = Type(BlockingInProcessChannel)
iopub_channel_class = Type(BlockingInProcessChannel)
stdin_channel_class = Type(BlockingInProcessStdInChannel)
def wait_for_ready(self):
# Wait for kernel info reply on shell channel
while True:
msg = self.shell_channel.get_msg(block=True)
if msg['msg_type'] == 'kernel_info_reply':
self._handle_kernel_info_reply(msg)
break
# Flush IOPub channel
while True:
try:
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
print(msg['msg_type'])
except Empty:
break
| mit | 1,264,620,225,133,396,200 | 31.989247 | 78 | 0.58605 | false |
JNRowe/shell-doctest | shelldoctest/shelldoctest.py | 1 | 8293 | #!/usr/bin/env python
"""
Shell Doctest module.
:Copyright: (c) 2009, the Shell Doctest Team All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import commands
import doctest
import inspect
import re
import subprocess
import sys
master = None
_EXC_WRAPPER = 'system_command("%s")'
def system_command(cmd, shell="bash"):
p = subprocess.Popen('%(shell)s -c "%(cmd)s"' % vars(),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status, stdout, stderr = p.wait(), p.stdout.read().strip(), p.stderr.read().strip()
if status == 0 and stderr == "":
format = "%(stdout)s"
elif stdout != "":
format = "(%(status)d)%(stderr)s\n%(stdout)s"
else:
format = "(%(status)d)%(stderr)s"
result = format % vars()
if sys.version_info < (2, 5):
print result
return
print(result)
class ShellExample(doctest.Example):
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
label=None,
options=None):
doctest.Example.__init__(self, source, want, exc_msg=None, lineno=lineno, indent=indent,
options=None)
self.label = label
class ShellDocTestParser(doctest.DocTestParser):
_PROMPT = "$"
_EXC_WRAPPER = _EXC_WRAPPER
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^ (?P<indent> [ ]*)) # PS0 line: indent
(?: \[(?P<label>.+)\]\n)? # PS0 line: label
(?: (?P<user>[\w]*)@(?P<host>[\w\.-]*)\n)? # PS0 line: user@host
(?: [ ]* \$ .*) # PS1 line
(?:\n [ ]* \. [ ].*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*\$) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
def parse(self, string, name='<string>'):
string = string.expandtabs()
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
for m in self._EXAMPLE_RE.finditer(string):
output.append(string[charno:m.start()])
lineno += string.count('\n', charno, m.start())
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
if not self._IS_BLANK_OR_COMMENT(source):
source = source.replace("\n","; ")
user = m.group('user')
host = m.group('host')
if host:
if user:
cmd_base = "ssh %(user)s@%(host)s '%(source)s'"
else:
cmd_base = "ssh %(host)s '%(source)s'"
source = cmd_base % vars()
output.append( ShellExample(self._EXC_WRAPPER % source.replace("\n","; "),
want, exc_msg, lineno=lineno,
label=m.group('label'),
indent=min_indent+len(m.group('indent')),
options=options) )
lineno += string.count('\n', m.start(), m.end())
charno = m.end()
output.append(string[charno:])
return output
def _parse_example(self, m, name, lineno):
indent = len(m.group('indent'))
source_lines = [sl for sl in m.group('source').split('\n') if sl.strip()[1] == " "]
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+len(self._PROMPT)+1:] for sl in source_lines])
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1]
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
def _check_prompt_blank(self, lines, indent, name, lineno):
for i, line in enumerate(lines):
if len(line) >= indent+len(self._PROMPT)+1 and line[indent+len(self._PROMPT)] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+len(self._PROMPT)], line))
class ShellDocTestRunner(doctest.DocTestRunner):
_EXC_WRAPPER = _EXC_WRAPPER
_BEFORE, _AFTER = [len(i) for i in _EXC_WRAPPER.split("%s")]
def __init__(self, checker=None, verbose=None, verbose_level=None, optionflags=0):
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose, optionflags=optionflags)
self._verbose_level = verbose_level
def report_start(self, out, test, example):
source = example.source[self._BEFORE:-(self._AFTER+1)] + "\n"
if self._verbose_level > 1:
out('Label:%s\n' % example.label)
if self._verbose:
if example.want:
out('Trying:\n' + doctest._indent(source) +
'Expecting:\n' + doctest._indent(example.want))
else:
out('Trying:\n' + doctest._indent(source) +
'Expecting nothing\n')
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source[self._BEFORE:-(self._AFTER+1)] + "\n"
out.append(doctest._indent(source))
return '\n'.join(out)
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=doctest.ELLIPSIS, extraglobs=None,
raise_on_error=False, exclude_empty=False,
verbose_level=None, filters=None,
):
if globs == None:
globs = dict()
globs.update({"system_command": system_command})
global master
if m is None:
m = sys.modules.get('__main__')
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
if name is None:
name = m.__name__
finder = doctest.DocTestFinder(parser=ShellDocTestParser(), exclude_empty=exclude_empty)
if raise_on_error:
runner = doctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = ShellDocTestRunner(verbose=verbose, verbose_level=verbose_level, optionflags=optionflags)
tests = finder.find(m, name, globs=globs, extraglobs=extraglobs)
if filters:
_tests = list()
z = dict([(k,v) for v,k in enumerate(filters)])
for test in tests:
test.examples = sorted(filter(lambda x: x.label in filters, test.examples),
cmp=lambda x,y: cmp(z[x.label], z[y.label]))
_tests.append(test)
tests = _tests
for test in tests:
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
if sys.version_info < (2, 6):
return runner.failures, runner.tries
return doctest.TestResults(runner.failures, runner.tries)
if __name__ == "__main__":
testmod()
| bsd-3-clause | 179,705,790,688,534,100 | 40.054455 | 106 | 0.526227 | false |
alfa-addon/addon | plugin.video.alfa/channels/vi2.py | 1 | 11788 | # -*- coding: utf-8 -*-
# -*- Channel Vi2.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'}
list_language = list(IDIOMAS.values())
list_quality = ['Full HD 1080p',
'HDRip',
'DVDScreener',
'720p',
'Ts Screener hq',
'HD Real 720p',
'DVDRip',
'BluRay-1080p',
'BDremux-1080p']
list_servers = [
'directo',
'openload',
'rapidvideo',
'jawcloud',
'cloudvideo',
'upvid',
'vevio',
'gamovideo'
]
host = 'http://vi2.co'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas',
thumbnail= get_thumb('movies', auto=True)))
# itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series',
# thumbnail= get_thumb('tvshows', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def select_menu(item):
logger.info()
itemlist=[]
url = host + '/%s/es/' % item.type
itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url,
thumbnail=get_thumb('genres', auto=True), type='all'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
thumbnail=get_thumb('year', auto=True), type='all'))
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
url = host + '/%s/es/ajax/1/' % item.type
link_type = item.title.lower()
if link_type == 'streaming':
link_type = 'flash'
movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE']
tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE']
if item.type == 'peliculas':
title = movies_options
thumb_1 = 'all'
else:
thumb_1 = 'last'
title = tv_options
itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type,
action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type,
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[1],
url=url + '?q=%s+espanol' % link_type, action='list_all',
thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[2],
url=url + '?q=%s+latino' % link_type, action='list_all',
thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[3],
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=',
thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def section(item):
logger.info()
itemlist=[]
excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent']
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">')
patron = 'href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl.replace('/?','/ajax/1/?')
if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()):
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def list_all(item):
from core import jsontools
logger.info()
itemlist = []
listed =[]
quality=''
infoLabels = {}
json_data= jsontools.load(get_source(item.url))
data = json_data['render']
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
#if item.type == 'peliculas':
patron = '<img\s*class="cover"[^>]+src="([^"]+)"\s*data-id="\d+"\s*'
patron +='alt="Ver\s*([^\(]+)(.*?)">\s*'
patron += '<div\s*class="mdl-card__menu">\s*<a\s*class="clean-link"\s*href="([^"]+)">'
patron += '.*?<span\s*class="link-size">([^<]*)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches:
if item.send_lang != '':
lang = item.send_lang
else:
lang = ''
year='-'
extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','')
extra_info = extra_info.split('|')
for info in extra_info:
info = info.strip()
if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info:
quality = info
if 'ingl' in info.lower():
info = 'VO'
if info in IDIOMAS:
lang = info
elif info.isdigit():
year = info
if lang in IDIOMAS:
lang = IDIOMAS[lang]
title = '%s' % scrapedtitle.strip()
if not config.get_setting('unify'):
if year.isdigit():
title = '%s [%s]' % (title, year)
if quality != '':
title = '%s [%s]' % (title, quality)
if lang != '':
title = '%s [%s]' % (title, lang)
thumbnail = host+scrapedthumbnail
url = host+scrapedurl
if item.type == 'series':
season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)')
infoLabels['season'] = season
infoLabels['episode'] = episode
else:
infoLabels['year'] = year
if title not in listed:
new_item = Item(channel=item.channel,
title=title,
url=url,
action='findvideos',
thumbnail=thumbnail,
type=item.type,
language = lang,
quality=quality,
link_type=item.link_type,
torrent_data= size,
infoLabels = infoLabels
)
if item.type == 'peliculas' or item.type == 'all':
new_item.contentTitle = scrapedtitle
else:
scrapedtitle = scrapedtitle.split(' - ')
new_item.contentSerieName = scrapedtitle[0]
itemlist.append(new_item)
listed.append(title)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.sort(key=lambda it: it.title)
# Paginación
if json_data['next']:
actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
next_page =int(actual_page) + 1
url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
action='list_all', send_lang=item.send_lang))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
server = ''
data = get_source(item.url)
pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"')
data = get_source(host+pre_url)
patron = 'data-video="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
lang = item.language
quality = item.quality
for url in matches:
title = ''
link_type = ''
server = ''
url = base64.b64decode(url.encode('utf8')).decode('utf8')
if 'torrent' in url:
if item.link_type == 'torrent' or item.type == 'all':
server = 'torrent'
link_type = 'torrent'
title = ' [%s]' % item.torrent_data
elif 'torrent' not in url:
link_type = 'flash'
if link_type == item.link_type.lower() or item.type == 'all':
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server,
language=lang, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
url = '%spelicula+%s+%s&o=2' % (item.url, texto, item.link_type)
#Parche busqueda global (solo vale para peliculas en streaming)
if not item.url:
item.type = 'peliculas'
item.link_type = 'flash'
ajax = '%s/%s/es/ajax/1/' % (host, item.type)
url = '%s?q=%s+%s+%s&o=2' % (ajax, item.type, texto, item.link_type)
item.url = url
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
| gpl-3.0 | -5,339,963,962,902,820,000 | 34.138037 | 140 | 0.54155 | false |
alduxvm/pyMultiWii | pymultiwii/__init__.py | 1 | 17209 | #!/usr/bin/env python
"""multiwii.py: Handles Multiwii Serial Protocol."""
__author__ = "Aldo Vargas"
__copyright__ = "Copyright 2017 Altax.net"
__license__ = "GPL"
__version__ = "1.6"
__maintainer__ = "Aldo Vargas"
__email__ = "[email protected]"
__status__ = "Development"
import serial, time, struct
class MultiWii:
"""Multiwii Serial Protocol message ID"""
""" notice: just attitude, rc channels and raw imu, set raw rc are implemented at the moment """
IDENT = 100
STATUS = 101
RAW_IMU = 102
SERVO = 103
MOTOR = 104
RC = 105
RAW_GPS = 106
COMP_GPS = 107
ATTITUDE = 108
ALTITUDE = 109
ANALOG = 110
RC_TUNING = 111
PID = 112
BOX = 113
MISC = 114
MOTOR_PINS = 115
BOXNAMES = 116
PIDNAMES = 117
WP = 118
BOXIDS = 119
RC_RAW_IMU = 121
SET_RAW_RC = 200
SET_RAW_GPS = 201
SET_PID = 202
SET_BOX = 203
SET_RC_TUNING = 204
ACC_CALIBRATION = 205
MAG_CALIBRATION = 206
SET_MISC = 207
RESET_CONF = 208
SET_WP = 209
SWITCH_RC_SERIAL = 210
IS_SERIAL = 211
DEBUG = 254
VTX_CONFIG = 88
VTX_SET_CONFIG = 89
EEPROM_WRITE = 250
REBOOT = 68
"""Class initialization"""
def __init__(self, serPort):
"""Global variables of data"""
self.PIDcoef = {'rp':0,'ri':0,'rd':0,'pp':0,'pi':0,'pd':0,'yp':0,'yi':0,'yd':0}
self.rcChannels = {'roll':0,'pitch':0,'yaw':0,'throttle':0,'elapsed':0,'timestamp':0}
self.rawIMU = {'ax':0,'ay':0,'az':0,'gx':0,'gy':0,'gz':0,'mx':0,'my':0,'mz':0,'elapsed':0,'timestamp':0}
self.motor = {'m1':0,'m2':0,'m3':0,'m4':0,'elapsed':0,'timestamp':0}
self.attitude = {'angx':0,'angy':0,'heading':0,'elapsed':0,'timestamp':0}
self.altitude = {'estalt':0,'vario':0,'elapsed':0,'timestamp':0}
self.message = {'angx':0,'angy':0,'heading':0,'roll':0,'pitch':0,'yaw':0,'throttle':0,'elapsed':0,'timestamp':0}
self.vtxConfig = {'device':0, 'band':0, 'channel':0, 'power':0, 'pit':0, 'unknown':0}
self.temp = ();
self.temp2 = ();
self.elapsed = 0
self.PRINT = 1
self.ser = serial.Serial()
self.ser.port = serPort
self.ser.baudrate = 115200
self.ser.bytesize = serial.EIGHTBITS
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_ONE
self.ser.timeout = 0
self.ser.xonxoff = False
self.ser.rtscts = False
self.ser.dsrdtr = False
self.ser.writeTimeout = 2
"""Time to wait until the board becomes operational"""
wakeup = 2
try:
self.ser.open()
if self.PRINT:
print ("Waking up board on "+self.ser.port+"...")
for i in range(1,wakeup):
if self.PRINT:
print (wakeup-i)
time.sleep(1)
else:
time.sleep(1)
except Exception as error:
print ("\n\nError opening "+self.ser.port+" port.\n"+str(error)+"\n\n")
"""Function for sending a command to the board"""
def sendCMD(self, data_length, code, data, data_format):
checksum = 0
total_data = ['$'.encode('utf-8'), 'M'.encode('utf-8'), '<'.encode('utf-8'), data_length, code] + data
for i in struct.pack('<2B' + data_format, *total_data[3:len(total_data)]):
checksum = checksum ^ i
total_data.append(checksum)
try:
b = None
b = self.ser.write(struct.pack('<3c2B'+ data_format + 'B', *total_data))
except Exception as error:
print ("\n\nError in sendCMD.")
print ("("+str(error)+")\n\n")
pass
"""Function for sending a command to the board and receive attitude"""
"""
Modification required on Multiwii firmware to Protocol.cpp in evaluateCommand:
case MSP_SET_RAW_RC:
s_struct_w((uint8_t*)&rcSerial,16);
rcSerialCount = 50; // 1s transition
s_struct((uint8_t*)&att,6);
break;
"""
def sendCMDreceiveATT(self, data_length, code, data):
checksum = 0
total_data = ['$'.encode('utf-8'), 'M'.encode('utf-8'), '<'.encode('utf-8'), data_length, code] + data
for i in struct.pack('<2B%dH' % len(data), *total_data[3:len(total_data)]):
checksum = checksum ^ i
total_data.append(checksum)
try:
start = time.time()
b = None
b = self.ser.write(struct.pack('<3c2B%dHB' % len(data), *total_data))
while True:
header = self.ser.read().decode('utf-8')
if header == '$':
header = header+self.ser.read(2).decode('utf-8')
break
datalength = struct.unpack('<b', self.ser.read())[0]
code = struct.unpack('<b', self.ser.read())
data = self.ser.read(datalength)
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.ser.flushInput()
self.ser.flushOutput()
elapsed = time.time() - start
self.attitude['angx']=float(temp[0]/10.0)
self.attitude['angy']=float(temp[1]/10.0)
self.attitude['heading']=float(temp[2])
self.attitude['elapsed']=round(elapsed,3)
self.attitude['timestamp']="%0.2f" % (time.time(),)
return self.attitude
except Exception as error:
print ("\n\nError in sendCMDreceiveATT.")
print ("("+str(error)+")\n\n")
pass
"""Function to arm / disarm """
"""
Modification required on Multiwii firmware to Protocol.cpp in evaluateCommand:
case MSP_SET_RAW_RC:
s_struct_w((uint8_t*)&rcSerial,16);
rcSerialCount = 50; // 1s transition
s_struct((uint8_t*)&att,6);
break;
"""
def arm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,2000,1000]
self.sendCMD(8,MultiWii.SET_RAW_RC,data)
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def disarm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,1000,1000]
self.sendCMD(8,MultiWii.SET_RAW_RC,data)
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def setPID(self,pd):
nd=[]
for i in np.arange(1,len(pd),2):
nd.append(pd[i]+pd[i+1]*256)
data = pd
print ("PID sending:", data)
self.sendCMD(30,MultiWii.SET_PID,data)
self.sendCMD(0,MultiWii.EEPROM_WRITE,[])
def setVTX(self,band,channel,power):
band_channel = ((band-1) << 3)|(channel-1)
t = None
while t == None :
t = self.getData(MultiWii.VTX_CONFIG)
different = (self.vtxConfig['band'] != band) | (self.vtxConfig['channel'] != channel) | (self.vtxConfig['power'] != power)
data = [band_channel,power,self.vtxConfig['pit']]
while different :
self.sendCMD(4,MultiWii.VTX_SET_CONFIG,data, 'H2B')
time.sleep(1)
self.sendCMD(0,MultiWii.EEPROM_WRITE,[],'')
self.ser.close()
time.sleep(3)
self.ser.open()
time.sleep(3)
t = None
while t == None :
t = self.getData(MultiWii.VTX_CONFIG)
print(t)
different = (self.vtxConfig['band'] != band) | (self.vtxConfig['channel'] != channel) | (self.vtxConfig['power'] != power)
"""Function to receive a data packet from the board"""
def getData(self, cmd):
try:
start = time.time()
self.sendCMD(0,cmd,[],'')
while True:
header = self.ser.read().decode('utf-8')
if header == '$':
header = header+self.ser.read(2).decode('utf-8')
break
datalength = struct.unpack('<b', self.ser.read())[0]
code = struct.unpack('<b', self.ser.read())
data = self.ser.read(datalength)
self.ser.flushInput()
self.ser.flushOutput()
elapsed = time.time() - start
if cmd == MultiWii.ATTITUDE:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.attitude['angx']=float(temp[0]/10.0)
self.attitude['angy']=float(temp[1]/10.0)
self.attitude['heading']=float(temp[2])
self.attitude['elapsed']=round(elapsed,3)
self.attitude['timestamp']="%0.2f" % (time.time(),)
return self.attitude
elif cmd == MultiWii.ALTITUDE:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.altitude['estalt']=float(temp[0])
self.altitude['vario']=float(temp[1])
self.altitude['elapsed']=round(elapsed,3)
self.altitude['timestamp']="%0.2f" % (time.time(),)
return self.altitude
elif cmd == MultiWii.RC:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.rcChannels['roll']=temp[0]
self.rcChannels['pitch']=temp[1]
self.rcChannels['yaw']=temp[2]
self.rcChannels['throttle']=temp[3]
self.rcChannels['elapsed']=round(elapsed,3)
self.rcChannels['timestamp']="%0.2f" % (time.time(),)
return self.rcChannels
elif cmd == MultiWii.RAW_IMU:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.rawIMU['ax']=float(temp[0])
self.rawIMU['ay']=float(temp[1])
self.rawIMU['az']=float(temp[2])
self.rawIMU['gx']=float(temp[3])
self.rawIMU['gy']=float(temp[4])
self.rawIMU['gz']=float(temp[5])
self.rawIMU['mx']=float(temp[6])
self.rawIMU['my']=float(temp[7])
self.rawIMU['mz']=float(temp[8])
self.rawIMU['elapsed']=round(elapsed,3)
self.rawIMU['timestamp']="%0.2f" % (time.time(),)
return self.rawIMU
elif cmd == MultiWii.MOTOR:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.motor['m1']=float(temp[0])
self.motor['m2']=float(temp[1])
self.motor['m3']=float(temp[2])
self.motor['m4']=float(temp[3])
self.motor['elapsed']="%0.3f" % (elapsed,)
self.motor['timestamp']="%0.2f" % (time.time(),)
return self.motor
elif cmd == MultiWii.PID:
temp = struct.unpack('<'+'h'*int(datalength/2),data)
dataPID=[]
if len(temp)>1:
d=0
for t in temp:
dataPID.append(t%256)
dataPID.append(t/256)
for p in [0,3,6,9]:
dataPID[p]=dataPID[p]/10.0
dataPID[p+1]=dataPID[p+1]/1000.0
self.PIDcoef['rp']= dataPID=[0]
self.PIDcoef['ri']= dataPID=[1]
self.PIDcoef['rd']= dataPID=[2]
self.PIDcoef['pp']= dataPID=[3]
self.PIDcoef['pi']= dataPID=[4]
self.PIDcoef['pd']= dataPID=[5]
self.PIDcoef['yp']= dataPID=[6]
self.PIDcoef['yi']= dataPID=[7]
self.PIDcoef['yd']= dataPID=[8]
return self.PIDcoef
elif cmd == MultiWii.VTX_CONFIG:
if datalength > 1:
temp = struct.unpack('<bbbbb',data)
self.vtxConfig['device'] = temp[0]
self.vtxConfig['band'] = temp[1]
self.vtxConfig['channel'] = temp[2]
self.vtxConfig['power'] = temp[3]
self.vtxConfig['pit'] = temp[4]
self.vtxConfig['unknown'] = 0
return self.vtxConfig
else:
temp = struct.unpack('<b',data)
self.vtxConfig['unknown'] = temp[0]
return self.vtxConfig
else:
return "No return error!"
except Exception as error:
print (error)
pass
"""Function to receive a data packet from the board. Note: easier to use on threads"""
def getDataInf(self, cmd):
while True:
try:
start = time.clock()
self.sendCMD(0,cmd,[])
while True:
header = self.ser.read().decode('utf-8')
if header == '$':
header = header+self.ser.read(2).decode('utf-8')
break
datalength = struct.unpack('<b', self.ser.read())[0]
code = struct.unpack('<b', self.ser.read())
data = self.ser.read(datalength)
temp = struct.unpack('<'+'h'*int(datalength/2),data)
elapsed = time.clock() - start
self.ser.flushInput()
self.ser.flushOutput()
if cmd == MultiWii.ATTITUDE:
self.attitude['angx']=float(temp[0]/10.0)
self.attitude['angy']=float(temp[1]/10.0)
self.attitude['heading']=float(temp[2])
self.attitude['elapsed']="%0.3f" % (elapsed,)
self.attitude['timestamp']="%0.2f" % (time.time(),)
elif cmd == MultiWii.RC:
self.rcChannels['roll']=temp[0]
self.rcChannels['pitch']=temp[1]
self.rcChannels['yaw']=temp[2]
self.rcChannels['throttle']=temp[3]
self.rcChannels['elapsed']="%0.3f" % (elapsed,)
self.rcChannels['timestamp']="%0.2f" % (time.time(),)
elif cmd == MultiWii.RAW_IMU:
self.rawIMU['ax']=float(temp[0])
self.rawIMU['ay']=float(temp[1])
self.rawIMU['az']=float(temp[2])
self.rawIMU['gx']=float(temp[3])
self.rawIMU['gy']=float(temp[4])
self.rawIMU['gz']=float(temp[5])
self.rawIMU['elapsed']="%0.3f" % (elapsed,)
self.rawIMU['timestamp']="%0.2f" % (time.time(),)
elif cmd == MultiWii.MOTOR:
self.motor['m1']=float(temp[0])
self.motor['m2']=float(temp[1])
self.motor['m3']=float(temp[2])
self.motor['m4']=float(temp[3])
self.motor['elapsed']="%0.3f" % (elapsed,)
self.motor['timestamp']="%0.2f" % (time.time(),)
except Exception as error:
print(error)
pass
"""Function to ask for 2 fixed cmds, attitude and rc channels, and receive them. Note: is a bit slower than others"""
def getData2cmd(self, cmd):
try:
start = time.time()
self.sendCMD(0,self.ATTITUDE,[])
while True:
header = self.ser.read().decode('utf-8')
if header == '$':
header = header+self.ser.read(2).decode('utf-8')
break
datalength = struct.unpack('<b', self.ser.read())[0]
code = struct.unpack('<b', self.ser.read())
data = self.ser.read(datalength)
temp = struct.unpack('<'+'h'*int(datalength/2),data)
self.ser.flushInput()
self.ser.flushOutput()
self.sendCMD(0,self.RC,[])
while True:
header = self.ser.read().decode('utf-8')
if header == '$':
header = header+self.ser.read(2).decode('utf-8')
break
datalength = struct.unpack('<b', self.ser.read())[0]
code = struct.unpack('<b', self.ser.read())
data = self.ser.read(datalength)
temp2 = struct.unpack('<'+'h'*int(datalength/2),data)
elapsed = time.time() - start
self.ser.flushInput()
self.ser.flushOutput()
if cmd == MultiWii.ATTITUDE:
self.message['angx']=float(temp[0]/10.0)
self.message['angy']=float(temp[1]/10.0)
self.message['heading']=float(temp[2])
self.message['roll']=temp2[0]
self.message['pitch']=temp2[1]
self.message['yaw']=temp2[2]
self.message['throttle']=temp2[3]
self.message['elapsed']=round(elapsed,3)
self.message['timestamp']="%0.2f" % (time.time(),)
return self.message
else:
return "No return error!"
except Exception as error:
print (error)
| gpl-3.0 | -4,195,867,136,472,903,000 | 39.302108 | 134 | 0.490964 | false |
ryfeus/lambda-packs | pytorch/source/caffe2/python/transformations_test.py | 1 | 12467 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python.transformations import Transformer
from caffe2.python import core, workspace
from caffe2.python import test_util as tu
transformer = Transformer()
class TestTransformations(tu.TestCase):
def _base_test_net(self):
net = core.Net("net")
net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
return net
def _add_nnpack(self, net):
transformer.AddNNPACK(net)
assert tu.str_compare(net.Proto().op[0].engine, "NNPACK")
def _fuse_nnpack_convrelu(self, net, expected_result_num_ops,
expected_activation_arg=True):
self._add_nnpack(net)
transformer.FuseNNPACKConvRelu(net)
self.assertEquals(tu.numOps(net), expected_result_num_ops)
has_activation_arg = False
for arg in net.Proto().op[0].arg:
if tu.str_compare(arg.name, "activation"):
assert tu.str_compare(arg.s, "Relu")
has_activation_arg = True
if expected_activation_arg:
assert has_activation_arg
else:
assert not has_activation_arg
def test_transformer_AddNNPACK(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y2"])
self._add_nnpack(net)
def test_transformer_FuseNNPACKConvRelu(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y2"])
self._fuse_nnpack_convrelu(net, 1)
def test_noFuseNNPACKConvRelu(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y2"])
net.Relu(["Y"], ["Y3"])
self._fuse_nnpack_convrelu(net, 3, expected_activation_arg=False)
def test_transformer_FuseNNPACKConvReluNoInplace(self):
net = self._base_test_net()
net.Relu(["Y"], ["X"])
self._fuse_nnpack_convrelu(net, 1)
assert net.Proto().op[0].output[0] != net.Proto().op[0].input[0]
def test_transformer_FuseNNPACKConvReluInplaceRelu(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y"])
self._fuse_nnpack_convrelu(net, 1)
assert net.Proto().op[0].output[0] != net.Proto().op[0].input[0]
def test_transformer_FuseNNPACKConvReluPingPongNaming(self):
net = self._base_test_net()
net.Relu(["Y"], ["X"])
net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
self._fuse_nnpack_convrelu(net, 2)
assert net.Proto().op[0].output[0] != net.Proto().op[0].input[0]
assert net.Proto().op[1].output[0] != net.Proto().op[1].input[0]
def test_transformer_FuseNNPACKConvReluFollowedByMultipleInputOp(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y2"])
net.Conv(["Y2", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
net.Relu(["Y"], ["Y2"])
self._fuse_nnpack_convrelu(net, 2)
assert net.Proto().op[0].output[0] != net.Proto().op[0].input[0]
assert net.Proto().op[1].output[0] != net.Proto().op[1].input[0]
def test_transformer_FuseNNPACKConvReluInplaceFollowedByMultipleInputOp(self):
net = self._base_test_net()
net.Relu(["Y"], ["Y"])
net.Conv(["Y", "w", "b"], ["Y2"], stride=1, pad=0, kernel=3, order="NCHW")
net.Relu(["Y2"], ["Y2"])
self._fuse_nnpack_convrelu(net, 2)
assert net.Proto().op[0].output[0] != net.Proto().op[0].input[0]
assert net.Proto().op[1].output[0] != net.Proto().op[1].input[0]
def test_transformer_SinkMaxPool(self):
net = self._base_test_net()
net.MaxPool(["Y"], ["Y1"], kernel=3)
net.Relu(["Y1"], ["Y1"])
transformer.SinkMaxPool(net)
assert tu.str_compare(net.Proto().op[1].type, "Relu")
assert tu.str_compare(net.Proto().op[2].type, "MaxPool")
@given(
size=st.integers(7, 10),
input_channels=st.integers(1, 10),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
)
def test_transformer_FuseConvBN(self, size, input_channels, seed, order, epsilon):
workspace.ResetWorkspace()
net = core.Net("net")
c = input_channels
h = size
w = size
k = 3
net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=k, order=order)
net.SpatialBN(
["Y", "scale", "bias", "mean", "var"],
["Y2"],
is_test=True,
order=order,
epsilon=epsilon,
)
np.random.seed(seed)
if order == "NCHW":
tu.randBlobFloat32("X", 1, c, h, w)
tu.randBlobFloat32("w", c, c, k, k)
else:
tu.randBlobFloat32("X", 1, h, w, c)
tu.randBlobFloat32("w", c, k, k, c)
tu.randBlobsFloat32(["b", "scale", "bias", "mean"], c)
# This is necessary because 1/sqrt(var) is used and if var is too small
# we get floating point artifacts that cause test failures
tu.randBlobFloat32("var", c, offset=0.5)
workspace.RunNetOnce(net)
preTransformOutput = workspace.FetchBlob("Y2").flatten()
workspace.FeedBlob("Y2", np.zeros((1, 1)))
transformer.FuseConvBN(net)
# Ensure fusion
assert tu.numOps(net) == 1
workspace.RunNetOnce(net)
postTransformOutput = workspace.FetchBlob("Y2").flatten()
# Check that there is no numerical difference
assert np.allclose(
preTransformOutput,
postTransformOutput,
rtol=5e-02,
atol=1e-03
)
@given(
size=st.integers(7, 10),
input_channels=st.integers(1, 10),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
)
def test_transformer_FuseConvBNNoConvBias(self, size, input_channels, seed, order, epsilon):
workspace.ResetWorkspace()
net = core.Net("net")
c = input_channels
h = size
w = size
k = 3
net.Conv(["X", "w"], ["Y"], stride=1, pad=0, kernel=k, order=order)
net.SpatialBN(
["Y", "scale", "bias", "mean", "var"],
["Y2"],
is_test=True,
order=order,
epsilon=epsilon,
)
np.random.seed(seed)
if order == "NCHW":
tu.randBlobFloat32("X", 1, c, h, w)
tu.randBlobFloat32("w", c, c, k, k)
else:
tu.randBlobFloat32("X", 1, h, w, c)
tu.randBlobFloat32("w", c, k, k, c)
tu.randBlobsFloat32(["scale", "bias", "mean"], c)
# This is necessary because 1/sqrt(var) is used and if var is too small
# we get floating point artifacts that cause test failures
tu.randBlobFloat32("var", c, offset=0.5)
workspace.RunNetOnce(net)
preTransformOutput = workspace.FetchBlob("Y2").flatten()
workspace.FeedBlob("Y2", np.zeros((1, 1)))
transformer.FuseConvBN(net)
# Ensure fusion
assert tu.numOps(net) == 1
workspace.RunNetOnce(net)
postTransformOutput = workspace.FetchBlob("Y2").flatten()
# Check that there is no numerical difference
assert np.allclose(
preTransformOutput,
postTransformOutput,
rtol=5e-02,
atol=1e-03
)
@given(
size=st.integers(7, 10),
input_channels=st.integers(1, 10),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
)
def test_transformer_FuseConvBNNoConvBiasDuplicatedName(self, size, input_channels, seed, order, epsilon):
workspace.ResetWorkspace()
net = core.Net("net")
c = input_channels
h = size
w = size
k = 3
net.Conv(["X", "w"], ["Y"], stride=1, pad=0, kernel=k, order=order)
net.SpatialBN(
["Y", "scale", "_bias0", "mean", "var"],
["Y2"],
is_test=True,
order=order,
epsilon=epsilon,
)
np.random.seed(seed)
if order == "NCHW":
tu.randBlobFloat32("X", 1, c, h, w)
tu.randBlobFloat32("w", c, c, k, k)
else:
tu.randBlobFloat32("X", 1, h, w, c)
tu.randBlobFloat32("w", c, k, k, c)
tu.randBlobsFloat32(["scale", "_bias0", "mean"], c)
# This is necessary because 1/sqrt(var) is used and if var is too small
# we get floating point artifacts that cause test failures
tu.randBlobFloat32("var", c, offset=0.5)
workspace.RunNetOnce(net)
preTransformOutput = workspace.FetchBlob("Y2").flatten()
workspace.FeedBlob("Y2", np.zeros((1, 1)))
transformer.FuseConvBN(net)
# Ensure fusion
assert tu.numOps(net) == 1
workspace.RunNetOnce(net)
postTransformOutput = workspace.FetchBlob("Y2").flatten()
print("pre")
print(preTransformOutput)
print("after")
print(postTransformOutput)
# Check that there is no numerical difference
assert np.allclose(
preTransformOutput,
postTransformOutput,
rtol=5e-02,
atol=1e-03
)
@given(
size=st.integers(7, 10),
input_channels=st.integers(1, 10),
kt=st.integers(3, 5),
kh=st.integers(3, 5),
kw=st.integers(3, 5),
seed=st.integers(0, 65535),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
)
def test_transformer_FuseConv3DBN(
self, size, input_channels, kt, kh, kw, seed, epsilon
):
workspace.ResetWorkspace()
net = core.Net("net")
c = input_channels
t = size
h = size
w = size
net.Conv(
["X", "w", "b"],
["Y"],
kernels=[kt, kh, kw],
)
net.SpatialBN(
["Y", "scale", "bias", "mean", "var"],
["Y2"],
is_test=True,
epsilon=epsilon,
)
np.random.seed(seed)
tu.randBlobFloat32("X", 1, c, t, h, w)
tu.randBlobFloat32("w", c, c, kt, kh, kw)
tu.randBlobsFloat32(["b", "scale", "bias", "mean"], c)
# This is necessary because 1/sqrt(var) is used and if var is too small
# we get floating point artifacts that cause test failures
tu.randBlobFloat32("var", c, offset=0.5)
workspace.RunNetOnce(net)
preTransformOutput = workspace.FetchBlob("Y2").flatten()
workspace.FeedBlob("Y2", np.zeros((1, 1)))
transformer.FuseConvBN(net)
# Ensure fusion
assert tu.numOps(net) == 1
workspace.RunNetOnce(net)
postTransformOutput = workspace.FetchBlob("Y2").flatten()
# Check that there is no numerical difference
assert np.allclose(
preTransformOutput,
postTransformOutput,
rtol=1e-02,
atol=1e-04
)
def test_converterEnforceUnusedInputs(self):
net = core.Net("net")
net.Relu(["X"], ["Y"])
net.Proto().external_input.extend(["fake"])
# This should now work
transformer.AddNNPACK(net) # just testing the converter
def test_converterEnforceUnusedOutputs(self):
net = core.Net("net")
net.Relu(["X"], ["Y"])
net.Proto().external_output.extend(["fake"])
with self.assertRaises(Exception):
transformer.AddNNPACK(net) # just testing the converter
| mit | -8,150,191,426,778,888,000 | 35.346939 | 110 | 0.566295 | false |
mio-to/cnambot | simulations/cnambot/cyr_demo02.py | 1 | 2268 | from morse.builder import *
# Land robot
morsy = Morsy()
morsy.translate(2.25, -0.75)
pose = Pose()
pose.translate(x=-0.0, z=0.0)
morsy.append(pose)
gripper = Gripper()
gripper.translate(x=0.2000, z=0.5000)
gripper.properties(Angle=360.0, Distance=0.5);
morsy.append(gripper)
camera = VideoCamera()
camera.translate(x=0.2000, z=0.9000)
camera.properties(cam_width=128, cam_height=128);
morsy.append(camera)
waypoint = Waypoint()
waypoint.translate(x=0.0, y=0.0, z=0.0)
waypoint.properties(AngleTolerance=0.1, Speed=50.0)
morsy.append(waypoint)
motion = MotionVW()
morsy.append(motion)
motion.add_service('socket')
gripper.add_service('socket')
pose.add_service('socket')
camera.add_interface('socket')
waypoint.add_interface('socket')
duke = Morsy()
duke.translate(-18.25, -0.25)
pose = Pose()
pose.translate(x=-0.0, z=0.0)
duke.append(pose)
gripper = Gripper()
gripper.translate(x=0.2000, z=0.5000)
gripper.properties(Angle=360.0, Distance=0.5);
duke.append(gripper)
camera = VideoCamera()
camera.translate(x=0.2000, z=0.9000)
camera.properties(cam_width=128, cam_height=128);
duke.append(camera)
waypoint = Waypoint()
waypoint.translate(x=0.0, y=0.0, z=0.0)
waypoint.properties(AngleTolerance=0.1, Speed=50.0)
duke.append(waypoint)
motion = MotionVW()
duke.append(motion)
motion.add_service('socket')
gripper.add_service('socket')
pose.add_service('socket')
camera.add_interface('socket')
waypoint.add_interface('socket')
lara = Morsy()
lara.translate(-8.75, -9.25)
pose = Pose()
pose.translate(x=-0.0, z=0.0)
lara.append(pose)
gripper = Gripper()
gripper.translate(x=0.2000, z=0.5000)
gripper.properties(Angle=360.0, Distance=0.5);
lara.append(gripper)
camera = VideoCamera()
camera.translate(x=0.2000, z=0.9000)
camera.properties(cam_width=128, cam_height=128);
lara.append(camera)
waypoint = Waypoint()
waypoint.translate(x=0.0, y=0.0, z=0.0)
waypoint.properties(AngleTolerance=0.1, Speed=50.0)
lara.append(waypoint)
motion = MotionVW()
lara.append(motion)
motion.add_service('socket')
gripper.add_service('socket')
pose.add_service('socket')
camera.add_interface('socket')
waypoint.add_interface('socket')
env = Environment('/home/orocos/Desktop/cnambot/code/environment_blender/indoor-5')
env.set_camera_rotation([1.0470, 0, 0.7854])
| gpl-2.0 | -8,526,240,466,677,877,000 | 21.019417 | 83 | 0.735009 | false |
Aravinthu/odoo | addons/mrp_repair/models/mrp_repair.py | 4 | 32545 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError, ValidationError
from odoo.tools import float_compare
class StockMove(models.Model):
_inherit = 'stock.move'
repair_id = fields.Many2one('mrp.repair')
class Repair(models.Model):
_name = 'mrp.repair'
_description = 'Repair Order'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'create_date desc'
@api.model
def _default_stock_location(self):
warehouse = self.env['stock.warehouse'].search([], limit=1)
if warehouse:
return warehouse.lot_stock_id.id
return False
name = fields.Char(
'Repair Reference',
default=lambda self: self.env['ir.sequence'].next_by_code('mrp.repair'),
copy=False, required=True,
states={'confirmed': [('readonly', True)]})
product_id = fields.Many2one(
'product.product', string='Product to Repair',
readonly=True, required=True, states={'draft': [('readonly', False)]})
product_qty = fields.Float(
'Product Quantity',
default=1.0, digits=dp.get_precision('Product Unit of Measure'),
readonly=True, required=True, states={'draft': [('readonly', False)]})
product_uom = fields.Many2one(
'product.uom', 'Product Unit of Measure',
readonly=True, required=True, states={'draft': [('readonly', False)]})
partner_id = fields.Many2one(
'res.partner', 'Customer',
index=True, states={'confirmed': [('readonly', True)]},
help='Choose partner for whom the order will be invoiced and delivered.')
address_id = fields.Many2one(
'res.partner', 'Delivery Address',
domain="[('parent_id','=',partner_id)]",
states={'confirmed': [('readonly', True)]})
default_address_id = fields.Many2one('res.partner', compute='_compute_default_address_id')
state = fields.Selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')], string='Status',
copy=False, default='draft', readonly=True, track_visibility='onchange',
help="* The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order.\n"
"* The \'Confirmed\' status is used when a user confirms the repair order.\n"
"* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed.\n"
"* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done.\n"
"* The \'Done\' status is set when repairing is completed.\n"
"* The \'Cancelled\' status is used when user cancel repair order.")
location_id = fields.Many2one(
'stock.location', 'Current Location',
default=_default_stock_location,
index=True, readonly=True, required=True,
states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]})
location_dest_id = fields.Many2one(
'stock.location', 'Delivery Location',
readonly=True, required=True,
states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]})
lot_id = fields.Many2one(
'stock.production.lot', 'Lot/Serial',
domain="[('product_id','=', product_id)]",
help="Products repaired are all belonging to this lot", oldname="prodlot_id")
guarantee_limit = fields.Date('Warranty Expiration', states={'confirmed': [('readonly', True)]})
operations = fields.One2many(
'mrp.repair.line', 'repair_id', 'Parts',
copy=True, readonly=True, states={'draft': [('readonly', False)]})
pricelist_id = fields.Many2one(
'product.pricelist', 'Pricelist',
default=lambda self: self.env['product.pricelist'].search([], limit=1).id,
help='Pricelist of the selected partner.')
partner_invoice_id = fields.Many2one('res.partner', 'Invoicing Address')
invoice_method = fields.Selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")], string="Invoice Method",
default='none', index=True, readonly=True, required=True,
states={'draft': [('readonly', False)]},
help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.')
invoice_id = fields.Many2one(
'account.invoice', 'Invoice',
copy=False, readonly=True, track_visibility="onchange")
move_id = fields.Many2one(
'stock.move', 'Move',
copy=False, readonly=True, track_visibility="onchange",
help="Move created by the repair order")
fees_lines = fields.One2many(
'mrp.repair.fee', 'repair_id', 'Operations',
copy=True, readonly=True, states={'draft': [('readonly', False)]})
internal_notes = fields.Text('Internal Notes')
quotation_notes = fields.Text('Quotation Notes')
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('mrp.repair'))
invoiced = fields.Boolean('Invoiced', copy=False, readonly=True)
repaired = fields.Boolean('Repaired', copy=False, readonly=True)
amount_untaxed = fields.Float('Untaxed Amount', compute='_amount_untaxed', store=True)
amount_tax = fields.Float('Taxes', compute='_amount_tax', store=True)
amount_total = fields.Float('Total', compute='_amount_total', store=True)
tracking = fields.Selection('Product Tracking', related="product_id.tracking")
@api.one
@api.depends('partner_id')
def _compute_default_address_id(self):
if self.partner_id:
self.default_address_id = self.partner_id.address_get(['contact'])['contact']
@api.one
@api.depends('operations.price_subtotal', 'invoice_method', 'fees_lines.price_subtotal', 'pricelist_id.currency_id')
def _amount_untaxed(self):
total = sum(operation.price_subtotal for operation in self.operations)
total += sum(fee.price_subtotal for fee in self.fees_lines)
self.amount_untaxed = self.pricelist_id.currency_id.round(total)
@api.one
@api.depends('operations.price_unit', 'operations.product_uom_qty', 'operations.product_id',
'fees_lines.price_unit', 'fees_lines.product_uom_qty', 'fees_lines.product_id',
'pricelist_id.currency_id', 'partner_id')
def _amount_tax(self):
val = 0.0
for operation in self.operations:
if operation.tax_id:
tax_calculate = operation.tax_id.compute_all(operation.price_unit, self.pricelist_id.currency_id, operation.product_uom_qty, operation.product_id, self.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for fee in self.fees_lines:
if fee.tax_id:
tax_calculate = fee.tax_id.compute_all(fee.price_unit, self.pricelist_id.currency_id, fee.product_uom_qty, fee.product_id, self.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
self.amount_tax = val
@api.one
@api.depends('amount_untaxed', 'amount_tax')
def _amount_total(self):
self.amount_total = self.pricelist_id.currency_id.round(self.amount_untaxed + self.amount_tax)
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
@api.onchange('product_id')
def onchange_product_id(self):
self.guarantee_limit = False
self.lot_id = False
if self.product_id:
self.product_uom = self.product_id.uom_id.id
@api.onchange('product_uom')
def onchange_product_uom(self):
res = {}
if not self.product_id or not self.product_uom:
return res
if self.product_uom.category_id != self.product_id.uom_id.category_id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
self.product_uom = self.product_id.uom_id.id
return res
@api.onchange('location_id')
def onchange_location_id(self):
self.location_dest_id = self.location_id.id
@api.onchange('partner_id')
def onchange_partner_id(self):
if not self.partner_id:
self.address_id = False
self.partner_invoice_id = False
self.pricelist_id = self.env['product.pricelist'].search([], limit=1).id
else:
addresses = self.partner_id.address_get(['delivery', 'invoice', 'contact'])
self.address_id = addresses['delivery'] or addresses['contact']
self.partner_invoice_id = addresses['invoice']
self.pricelist_id = self.partner_id.property_product_pricelist.id
@api.multi
def button_dummy(self):
# TDE FIXME: this button is very interesting
return True
@api.multi
def action_repair_cancel_draft(self):
if self.filtered(lambda repair: repair.state != 'cancel'):
raise UserError(_("Repair must be canceled in order to reset it to draft."))
self.mapped('operations').write({'state': 'draft'})
return self.write({'state': 'draft'})
def action_validate(self):
self.ensure_one()
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
available_qty = self.env['stock.quant']._get_available_quantity(self.product_id, self.location_id, self.lot_id, strict=True)
if float_compare(available_qty, self.product_qty, precision_digits=precision) >= 0:
return self.action_repair_confirm()
else:
return {
'name': _('Insufficient Quantity'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.warn.insufficient.qty.repair',
'view_id': self.env.ref('mrp_repair.stock_warn_insufficient_qty_repair_form_view').id,
'type': 'ir.actions.act_window',
'context': {
'default_product_id': self.product_id.id,
'default_location_id': self.location_id.id,
'default_repair_id': self.id
},
'target': 'new'
}
@api.multi
def action_repair_confirm(self):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
if self.filtered(lambda repair: repair.state != 'draft'):
raise UserError(_("Can only confirm draft repairs."))
before_repair = self.filtered(lambda repair: repair.invoice_method == 'b4repair')
before_repair.write({'state': '2binvoiced'})
to_confirm = self - before_repair
to_confirm_operations = to_confirm.mapped('operations')
to_confirm_operations.write({'state': 'confirmed'})
to_confirm.write({'state': 'confirmed'})
return True
@api.multi
def action_repair_cancel(self):
if self.filtered(lambda repair: repair.state == 'done'):
raise UserError(_("Cannot cancel completed repairs."))
if any(repair.invoiced for repair in self):
raise UserError(_('Repair order is already invoiced.'))
self.mapped('operations').write({'state': 'cancel'})
return self.write({'state': 'cancel'})
@api.multi
def action_send_mail(self):
self.ensure_one()
template_id = self.env.ref('mrp_repair.mail_template_mrp_repair_quotation').id
ctx = {
'default_model': 'mrp.repair',
'default_res_id': self.id,
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment'
}
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'target': 'new',
'context': ctx,
}
@api.multi
def print_repair_order(self):
return self.env.ref('mrp_repair.action_report_mrp_repair_order').report_action(self)
def action_repair_invoice_create(self):
for repair in self:
repair.action_invoice_create()
if repair.invoice_method == 'b4repair':
repair.action_repair_ready()
elif repair.invoice_method == 'after_repair':
repair.write({'state': 'done'})
return True
@api.multi
def action_invoice_create(self, group=False):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = dict.fromkeys(self.ids, False)
invoices_group = {}
InvoiceLine = self.env['account.invoice.line']
Invoice = self.env['account.invoice']
for repair in self.filtered(lambda repair: repair.state not in ('draft', 'cancel') and not repair.invoice_id):
if not repair.partner_id.id and not repair.partner_invoice_id.id:
raise UserError(_('You have to select a Partner Invoice Address in the repair form!'))
comment = repair.quotation_notes
if repair.invoice_method != 'none':
if group and repair.partner_invoice_id.id in invoices_group:
invoice = invoices_group[repair.partner_invoice_id.id]
invoice.write({
'name': invoice.name + ', ' + repair.name,
'origin': invoice.origin + ', ' + repair.name,
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
})
else:
if not repair.partner_id.property_account_receivable_id:
raise UserError(_('No account defined for partner "%s".') % repair.partner_id.name)
invoice = Invoice.create({
'name': repair.name,
'origin': repair.name,
'type': 'out_invoice',
'account_id': repair.partner_id.property_account_receivable_id.id,
'partner_id': repair.partner_invoice_id.id or repair.partner_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position_id': repair.partner_id.property_account_position_id.id
})
invoices_group[repair.partner_invoice_id.id] = invoice
repair.write({'invoiced': True, 'invoice_id': invoice.id})
for operation in repair.operations:
if operation.type == 'add':
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income_id:
account_id = operation.product_id.property_account_income_id.id
elif operation.product_id.categ_id.property_account_income_categ_id:
account_id = operation.product_id.categ_id.property_account_income_categ_id.id
else:
raise UserError(_('No account defined for product "%s".') % operation.product_id.name)
invoice_line = InvoiceLine.create({
'invoice_id': invoice.id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_ids': [(6, 0, [x.id for x in operation.tax_id])],
'uom_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty * operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
operation.write({'invoiced': True, 'invoice_line_id': invoice_line.id})
for fee in repair.fees_lines:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise UserError(_('No product defined on Fees!'))
if fee.product_id.property_account_income_id:
account_id = fee.product_id.property_account_income_id.id
elif fee.product_id.categ_id.property_account_income_categ_id:
account_id = fee.product_id.categ_id.property_account_income_categ_id.id
else:
raise UserError(_('No account defined for product "%s".') % fee.product_id.name)
invoice_line = InvoiceLine.create({
'invoice_id': invoice.id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_ids': [(6, 0, [x.id for x in fee.tax_id])],
'uom_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty * fee.price_unit
})
fee.write({'invoiced': True, 'invoice_line_id': invoice_line.id})
invoice.compute_taxes()
res[repair.id] = invoice.id
return res
@api.multi
def action_created_invoice(self):
self.ensure_one()
return {
'name': _('Invoice created'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'account.invoice',
'view_id': self.env.ref('account.invoice_form').id,
'target': 'current',
'res_id': self.invoice_id.id,
}
def action_repair_ready(self):
self.mapped('operations').write({'state': 'confirmed'})
return self.write({'state': 'ready'})
@api.multi
def action_repair_start(self):
""" Writes repair order state to 'Under Repair'
@return: True
"""
if self.filtered(lambda repair: repair.state not in ['confirmed', 'ready']):
raise UserError(_("Repair must be confirmed before starting reparation."))
self.mapped('operations').write({'state': 'confirmed'})
return self.write({'state': 'under_repair'})
@api.multi
def action_repair_end(self):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
if self.filtered(lambda repair: repair.state != 'under_repair'):
raise UserError(_("Repair must be under repair in order to end reparation."))
for repair in self:
repair.write({'repaired': True})
vals = {'state': 'done'}
vals['move_id'] = repair.action_repair_done().get(repair.id)
if not repair.invoiced and repair.invoice_method == 'after_repair':
vals['state'] = '2binvoiced'
repair.write(vals)
return True
@api.multi
def action_repair_done(self):
""" Creates stock move for operation and stock move for final product of repair order.
@return: Move ids of final products
"""
if self.filtered(lambda repair: not repair.repaired):
raise UserError(_("Repair must be repaired in order to make the product moves."))
res = {}
Move = self.env['stock.move']
for repair in self:
moves = self.env['stock.move']
for operation in repair.operations:
move = Move.create({
'name': repair.name,
'product_id': operation.product_id.id,
'product_uom_qty': operation.product_uom_qty,
'product_uom': operation.product_uom.id,
'partner_id': repair.address_id.id,
'location_id': operation.location_id.id,
'location_dest_id': operation.location_dest_id.id,
'move_line_ids': [(0, 0, {'product_id': operation.product_id.id,
'lot_id': operation.lot_id.id,
'product_uom_qty': 0, # bypass reservation here
'product_uom_id': operation.product_uom.id,
'qty_done': operation.product_uom_qty,
'package_id': False,
'result_package_id': False,
'location_id': operation.location_id.id, #TODO: owner stuff
'location_dest_id': operation.location_dest_id.id,})],
'repair_id': repair.id,
'origin': repair.name,
})
moves |= move
operation.write({'move_id': move.id, 'state': 'done'})
move = Move.create({
'name': repair.name,
'product_id': repair.product_id.id,
'product_uom': repair.product_uom.id or repair.product_id.uom_id.id,
'product_uom_qty': repair.product_qty,
'partner_id': repair.address_id.id,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'move_line_ids': [(0, 0, {'product_id': repair.product_id.id,
'lot_id': repair.lot_id.id,
'product_uom_qty': 0, # bypass reservation here
'product_uom_id': repair.product_uom.id or repair.product_id.uom_id.id,
'qty_done': repair.product_qty,
'package_id': False,
'result_package_id': False,
'location_id': repair.location_id.id, #TODO: owner stuff
'location_dest_id': repair.location_dest_id.id,})],
'repair_id': repair.id,
'origin': repair.name,
})
consumed_lines = moves.mapped('move_line_ids')
produced_lines = move.move_line_ids
moves |= move
moves._action_done()
produced_lines.write({'consume_line_ids': [(6, 0, consumed_lines.ids)]})
res[repair.id] = move.id
return res
class RepairLine(models.Model):
_name = 'mrp.repair.line'
_description = 'Repair Line'
name = fields.Char('Description', required=True)
repair_id = fields.Many2one(
'mrp.repair', 'Repair Order Reference',
index=True, ondelete='cascade')
type = fields.Selection([
('add', 'Add'),
('remove', 'Remove')], 'Type', required=True)
product_id = fields.Many2one('product.product', 'Product', required=True)
invoiced = fields.Boolean('Invoiced', copy=False, readonly=True)
price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price'))
price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0)
tax_id = fields.Many2many(
'account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes')
product_uom_qty = fields.Float(
'Quantity', default=1.0,
digits=dp.get_precision('Product Unit of Measure'), required=True)
product_uom = fields.Many2one(
'product.uom', 'Product Unit of Measure',
required=True)
invoice_line_id = fields.Many2one(
'account.invoice.line', 'Invoice Line',
copy=False, readonly=True)
location_id = fields.Many2one(
'stock.location', 'Source Location',
index=True, required=True)
location_dest_id = fields.Many2one(
'stock.location', 'Dest. Location',
index=True, required=True)
move_id = fields.Many2one(
'stock.move', 'Inventory Move',
copy=False, readonly=True)
lot_id = fields.Many2one('stock.production.lot', 'Lot/Serial')
state = fields.Selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')], 'Status', default='draft',
copy=False, readonly=True, required=True,
help='The status of a repair line is set automatically to the one of the linked repair order.')
@api.constrains('lot_id', 'product_id')
def constrain_lot_id(self):
for line in self.filtered(lambda x: x.product_id.tracking != 'none' and not x.lot_id):
raise ValidationError(_("Serial number is required for operation line with product '%s'") % (line.product_id.name))
@api.one
@api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id', 'repair_id.invoice_method')
def _compute_price_subtotal(self):
taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id)
self.price_subtotal = taxes['total_excluded']
@api.onchange('type', 'repair_id')
def onchange_operation_type(self):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not self.type:
self.location_id = False
self.location_dest_id = False
elif self.type == 'add':
self.onchange_product_id()
args = self.repair_id.company_id and [('company_id', '=', self.repair_id.company_id.id)] or []
warehouse = self.env['stock.warehouse'].search(args, limit=1)
self.location_id = warehouse.lot_stock_id
self.location_dest_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id
else:
self.price_unit = 0.0
self.tax_id = False
self.location_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id
self.location_dest_id = self.env['stock.location'].search([('scrap_location', '=', True)], limit=1).id
@api.onchange('repair_id', 'product_id', 'product_uom_qty')
def onchange_product_id(self):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal. """
partner = self.repair_id.partner_id
pricelist = self.repair_id.pricelist_id
if not self.product_id or not self.product_uom_qty:
return
if self.product_id:
if partner:
self.name = self.product_id.with_context(lang=partner.lang).display_name
else:
self.name = self.product_id.display_name
self.product_uom = self.product_id.uom_id.id
if self.type != 'remove':
if partner and self.product_id:
self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids
warning = False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')}
else:
price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner)
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")}
else:
self.price_unit = price
if warning:
return {'warning': warning}
class RepairFee(models.Model):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
repair_id = fields.Many2one(
'mrp.repair', 'Repair Order Reference',
index=True, ondelete='cascade', required=True)
name = fields.Char('Description', index=True, required=True)
product_id = fields.Many2one('product.product', 'Product')
product_uom_qty = fields.Float('Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0)
price_unit = fields.Float('Unit Price', required=True)
product_uom = fields.Many2one('product.uom', 'Product Unit of Measure', required=True)
price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0)
tax_id = fields.Many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes')
invoice_line_id = fields.Many2one('account.invoice.line', 'Invoice Line', copy=False, readonly=True)
invoiced = fields.Boolean('Invoiced', copy=False, readonly=True)
@api.one
@api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id')
def _compute_price_subtotal(self):
taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id)
self.price_subtotal = taxes['total_excluded']
@api.onchange('repair_id', 'product_id', 'product_uom_qty')
def onchange_product_id(self):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal. """
if not self.product_id:
return
partner = self.repair_id.partner_id
pricelist = self.repair_id.pricelist_id
if partner and self.product_id:
self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids
if self.product_id:
self.name = self.product_id.display_name
self.product_uom = self.product_id.uom_id.id
warning = False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')}
else:
price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner)
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")}
else:
self.price_unit = price
if warning:
return {'warning': warning}
| agpl-3.0 | -5,396,696,964,052,102,000 | 48.161631 | 227 | 0.570103 | false |
arsfeld/conduit | conduit/modules/ShutterflyModule/ShutterflyModule.py | 1 | 3532 | """
Shutterfly Data Sink
"""
import logging
log = logging.getLogger("modules.Shutterfly")
import conduit
import conduit.utils as Utils
from conduit.datatypes import Rid
import conduit.dataproviders.Image as Image
import conduit.Exceptions as Exceptions
import conduit.datatypes.Photo as Photo
Utils.dataprovider_add_dir_to_path(__file__, "shutterfly")
from shutterfly import Shutterfly
from gettext import gettext as _
MODULES = {
"ShutterflySink" : {"type" : "dataprovider"},
}
class ShutterflySink(Image.ImageSink):
_name_ = _("Shutterfly")
_description_ = _("Synchronize your Shutterfly photos")
_module_type_ = "sink"
_icon_ = "shutterfly"
_configurable_ = True
def __init__(self, *args):
Image.ImageSink.__init__(self)
self.username = ""
self.password = ""
self.album = ""
self.sapi = None
self.salbum = None
self.sphotos = None
def _get_raw_photo_url(self, photoInfo):
return photoInfo.url
def _get_photo_info(self, id):
if self.sphotos.has_key(id):
return self.sphotos[id]
else:
return None
def _get_photo_formats(self):
return ("image/jpeg", )
def refresh(self):
Image.ImageSink.refresh(self)
self.sapi = Shutterfly(self.username, self.password)
albums = self.sapi.getAlbums()
if not albums.has_key(self.album):
self.salbum = self.sapi.createAlbum(self.album)
else:
self.salbum = albums[self.album]
self.sphotos = self.salbum.getPhotos()
def get_all(self):
return self.sphotos.keys()
def get(self, LUID):
#Image.ImageSink.get(self, LUID)
sphoto = self.sphotos[LUID]
f = Photo.Photo(URI=sphoto.url)
f.set_open_URI(sphoto.url)
f.set_UID(LUID)
return f
def delete(self, LUID):
"""
Delete a photo by ID
Deleting a photo invalidates album length and photo index values.
We must reload the photos (or do something else...)
"""
if not self.sphotos.has_key(LUID):
log.warn("Photo does not exist")
return
try:
self.salbum.deletePhoto(self.sphotos[LUID])
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Delete Error - Try Again.")
self.sphotos = self.salbum.getPhotos()
def _upload_photo(self, uploadInfo):
"""
Upload to album
"""
try:
ret = self.salbum.uploadPhoto(uploadInfo.url, uploadInfo.mimeType, uploadInfo.name)
return Rid(ret.id)
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Upload Error.")
def configure(self, window):
"""
Configures the ShutterflySink
"""
widget = Utils.dataprovider_glade_get_widget(
__file__,
"shutterfly.glade",
"ShutterflySinkConfigDialog")
# Get configuration widgets
username = widget.get_widget("username")
password = widget.get_widget("password")
album = widget.get_widget("album")
# Load the widgets with presets
username.set_text(self.username)
password.set_text(self.password)
album.set_text(self.album)
dlg = widget.get_widget("ShutterflySinkConfigDialog")
response = Utils.run_dialog(dlg, window)
if response == True:
self.username = username.get_text()
self.password = password.get_text()
self.album = album.get_text()
dlg.destroy()
def get_configuration(self):
return {
"username" : self.username,
"password" : self.password,
"album" : self.album
}
def is_configured(self, isSource, isTwoWay):
if len(self.username) < 1:
return False
if len(self.password) < 1:
return False
if len(self.album) < 1:
return False
return True
def get_UID(self):
return self.username+":"+self.album
| gpl-2.0 | -7,614,566,365,720,435,000 | 22.084967 | 86 | 0.693658 | false |
ybayle/ReproducibleResearchIEEE2017 | src/svmbff.py | 1 | 22789 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author Yann Bayle
# E-mail [email protected]
# License MIT
# Created 13/10/2016
# Updated 20/01/2017
# Version 1.0.0
#
"""
Description of svmbff.py
======================
bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe filename.mf -w out.arff
:Example:
python svmbff.py
"""
import os
import csv
import sys
import time
import utils
import shutil
import argparse
import multiprocessing
from statistics import stdev
from scipy.io import arff
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score
begin = int(round(time.time() * 1000))
def validate_arff(filename):
"""Description of validate_arff
Check if filename exists on path and is a file
If file corresponds to valid arff file return absolute path
Otherwise move file to invalid directory and return False
"""
# Check if file exists
if os.path.isfile(filename) and os.path.exists(filename):
filename = os.path.abspath(filename)
else:
return False
# If does not satisfy min size, move to "empty" folder
if os.stat(filename).st_size < 8100:
tmp_path = filename.split("/")
empty_dirname = "/".join(tmp_path[:-1]) + "/empty/"
if not os.path.exists(empty_dirname):
os.makedirs(empty_dirname)
shutil.move(filename, empty_dirname + tmp_path[-1])
return False
# # If filename does not match with feature name, move to "invalid" folder
# name_file = filename.split("/")[-1][:12]
# with open(filename) as filep:
# for i, line in enumerate(filep):
# if i == 70:
# # 71th line
# name_feat = line.split(" ")[2][1:13]
# break
# if name_file != name_feat:
# tmp_path = filename.split("/")
# invalid_dirname = "/".join(tmp_path[:-1]) + "/invalid/"
# if not os.path.exists(invalid_dirname):
# os.makedirs(invalid_dirname)
# shutil.move(filename, invalid_dirname + tmp_path[-1])
# return False
# If everything went well, return filename absolute path
return filename
def merge_arff(indir, outfilename):
"""Description of merge_arff
bextract program from Marsyas generate one output file per audio file
This function merge them all in one unique file
Check if analysed file are valid i.e. not empty
"""
utils.print_success("Preprocessing ARFFs")
indir = utils.abs_path_dir(indir)
filenames = os.listdir(indir)
outfn = open(outfilename, 'w')
cpt_invalid_fn = 0
# Write first lines of ARFF template file
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
with open(new_fn, 'r') as template:
nb_line = 74
for line in template:
if not nb_line:
break
nb_line -= 1
outfn.write(line)
break
else:
cpt_invalid_fn += 1
# Append all arff file to the output file
cur_file_num = 1
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
cur_file_num = cur_file_num + 1
utils.print_progress_start("Analysing file\t" + str(cur_file_num))
fname = open(new_fn, 'r')
outfn.write("".join(fname.readlines()[74:77]))
fname.close()
else:
cpt_invalid_fn += 1
utils.print_progress_end()
outfn.close()
# os.system("rm " + indir + "*.arff")
if cpt_invalid_fn:
utils.print_warning(str(cpt_invalid_fn) + " ARFF files with errors found")
return outfilename
def add_groundtruth(feature_fn, groundtruth_fn, output_fn):
"""Description of add_groundtruth
Write in output filename the groundtruth merged with corresponding features
..todo:: Error with old_tag not corresponding to filename...
"""
utils.print_success("Adding groundtruth")
feature_fn = utils.abs_path_file(feature_fn)
groundtruth_fn = utils.abs_path_file(groundtruth_fn)
if os.path.isfile(output_fn) and os.path.exists(output_fn):
utils.print_warning("Overwritting existing output file: " +
utils.abs_path_file(output_fn))
# TODO Read groundtruth file in memory
tmp_gt = csv.reader(open(groundtruth_fn, "r"))
groundtruths = {}
for row in tmp_gt:
groundtruths[row[0]] = row[1]
tags = []
output = open(output_fn, "w")
# switch if test set preprocessing
# separator = "_"
separator = "."
with open(feature_fn, "r") as feat:
line_num = 0
tmp_line = ""
for line in feat:
line_num += 1
if line_num > 74:
if line[0] != "%":
# Alter feature line with correct tag
cur_line = line.split(",")
old_tag = cur_line[-1].split(separator)[0]
if old_tag in groundtruths:
new_tag = groundtruths[old_tag]
output.write(tmp_line + ",".join(cur_line[:-1]) + "," + new_tag +"\n")
tmp_line = ""
tags.append(new_tag)
else:
# TODO
# File not in groundtruth
tmp_line = ""
# utils.print_warning("Error with " + old_tag)
else:
tmp_line += line
elif line_num == 2:
output.write("@relation train_test.arff\n")
# output.write("@relation MARSYAS_KEA\n")
elif line_num == 71:
# Alter line 71 containing all tag gathered along the way
# TODO enhance
output.write("@attribute output {i,s}\n")
else:
# Write header
output.write(line)
output.close()
def split_number(number, nb_folds):
"""Description of split_number
Return an int array of size nb_folds where the sum of cells = number
All the integers in cells are the same +-1
"""
if not isinstance(number, int) and not isinstance(nb_folds, int):
utils.print_error("Variable must be integer")
if number < nb_folds:
utils.print_error("Number of folds > Number of data available")
min_num = int(number/nb_folds)
folds = [min_num] * nb_folds
for num in range(0, number-(min_num*nb_folds)):
folds[num] = folds[num] + 1
return folds
def create_folds(filelist, nb_folds, folds_dir, invert_train_test=False):
"""Description of create_folds
"""
utils.print_success("Creating folds")
if nb_folds < 1:
utils.print_error("Wrong number of folds provided")
# folds_dir = "/".join(filelist.split("/")[:-1])
if nb_folds == 1:
# Train and test set are the same
folds_dir = folds_dir + "01_fold/"
utils.create_dir(folds_dir)
os.system("cp " + filelist + " " + folds_dir + "/train_test.arff")
else:
# Create train and test set
folds_dir = folds_dir + str(nb_folds).zfill(2) + "_folds/"
utils.create_dir(folds_dir)
# TODO
# Read filelist
# Extract name and tag
# Separate different tag
# create folds
data, meta = arff.loadarff(filelist)
tags = {}
for row in data:
tag = row[-1].decode("ascii")
if tag in tags:
tags[tag] += 1
else:
tags[tag] = 1
tags_folds = {}
tags_folds_index = {}
for tag in tags:
tags_folds[tag] = split_number(tags[tag], nb_folds)
tags_folds_index[tag] = 0
# Create empty folds
folds = {}
# Init empty folds
for index in range(0, nb_folds):
folds[index] = ""
# Fill folds with data
with open(filelist, "r") as filelist_pointer:
arff_header = ""
tmp = ""
for i, line in enumerate(filelist_pointer):
utils.print_progress_start("\t" + str(i))
# Until the 75th line
if i > 74:
# Process ARFF data
if "% " in line:
# Memorize line
tmp += line
else:
# Get line 3 and add it to corresponding fold
tag = line.split(",")[-1][:-1]
num_fold = tags_folds_index[tag]
if tags_folds[tag][num_fold] == 0:
tags_folds_index[tag] += 1
tags_folds[tag][tags_folds_index[tag]] -= 1
folds[tags_folds_index[tag]] += tmp + line
tmp = ""
else:
# Save ARFF header lines
arff_header += line
utils.print_progress_end
# At this point data has been split up in different part
# Use this part to create train/test split
if invert_train_test:
# Test is bigger than train
fn_with_min_data = "/train_"
fn_with_max_data = "/test_"
else:
# Train is bigger than test
fn_with_min_data = "/test_"
fn_with_max_data = "/train_"
for index_test in range(0, nb_folds):
filep = open(folds_dir + fn_with_min_data + str(index_test+1).zfill(2) + ".arff", "a")
filep.write(arff_header + folds[index_test])
filep.close()
filep = open(folds_dir + fn_with_max_data + str(index_test+1).zfill(2) + ".arff", "a")
filep.write(arff_header)
for index_train in range(0, nb_folds):
if index_train != index_test:
filep.write(folds[index_train])
filep.close()
return folds_dir
def process_results(in_fn, out_fn):
in_fn = utils.abs_path_file(in_fn)
out_fp = open(out_fn, "w")
with open(in_fn, "r") as filep:
for index, line in enumerate(filep):
if index % 2:
row = line[:-1].split("\t")
out_fp.write(row[0].split("_")[0] + "," + row[2] + "\n")
out_fp.close()
def experiment_2_3():
process_results("src/tmp/svmbff/SVMBFF.csv", "predictions/SVMBFF.csv")
def run_kea(train_file, test_file, out_file, verbose=False):
"""Description of run_kea
Launch kea classification on specified file
"""
kea_cmd = 'kea -m tags -w ' + train_file + ' -tw ' + test_file + ' -pr ' + out_file
if not verbose:
kea_cmd += "> /dev/null 2>&1"
os.system(kea_cmd)
train_dir = train_file.split(os.sep)
train_dir = os.sep.join(train_dir[:-1])
# os.system("rm " + train_dir + "/*affinities*")
test_dir = test_file.split(os.sep)
test_dir = os.sep.join(test_dir[:-1])
# os.system("rm " + test_dir + "/*affinities*")
def run_kea_on_folds(folds_dir):
"""Description of run_kea_on_folds
Wrapper for kea on folds
"""
folds_dir = utils.abs_path_dir(folds_dir)
out_file = folds_dir + "/results.txt"
if os.path.exists(folds_dir + "/train_test.arff"):
train_file = folds_dir + "/train_test.arff"
test_file = train_file
run_kea(train_file, test_file, out_file)
else:
nb_folds = len([name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name))])
# Run on multiple train/test
for index in range(1, int(nb_folds/2)+1):
utils.print_progress_start("Train/Test on fold " + str(index))
train_file = folds_dir + "/train_" + str(index).zfill(2) + ".arff"
test_file = folds_dir + "/test_" + str(index).zfill(2) + ".arff"
out_file = folds_dir + "/results_" + str(index).zfill(2) + ".arff"
run_kea(train_file, test_file, out_file)
utils.print_progress_end()
utils.print_warning("TODO multiprocessing")
# # Parallel computing on each TrainTestFolds
# printTitle("Parallel train & test of folds")
# partialRunTrainTestOnFold = partial(runTrainTestOnFold, args=args)
# pool = multiprocessing.Pool()
# pool.map(partialRunTrainTestOnFold, range(nb_folds)) #make our results with a map call
# pool.close() #we are not adding any more processes
# pool.join() #tell it to wait until all threads are done before going on
def extract_feat_train():
dirs = ["/media/sf_SharedFolder/DataSets/Jamendo/Yann/song/",
"/media/sf_SharedFolder/DataSets/ccmixter_corpus/instru/",
"/media/sf_SharedFolder/DataSets/MedleyDB/MedleyDB/instru/vrai/"]
outdir= "res/"
for indir in dirs:
extensions = ["wav", "mp3"]
filenames = [fn for fn in os.listdir(indir)
if any(fn.endswith(ext) for ext in extensions)]
for index, filename in enumerate(filenames):
dirName = indir.split("/")[-2] + ".mf"
with open(dirName, "w") as filep:
filep.write(indir + filename + "\n")
outfilename = outdir + filename[:-3].replace(" ", "_") + "arff"
bextract_cmd = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe " + dirName + " -w " + outfilename
os.system(bextract_cmd)
def read_gts(filename):
filename = utils.abs_path_file(filename)
groundtruths = {}
i = 0
with open(filename, "r") as filep:
for index, line in enumerate(filep):
if index > 73:
if i == 0:
i += 1
name = line.split("/")[-1][:-1]
elif i == 1:
i += 1
elif i == 2:
i = 0
groundtruths[name] = line.split(",")[-1][:-1]
return groundtruths
def read_preds(filename):
pres_filen = utils.abs_path_file(filename)
predictions = {}
i = 0
with open(filename, "r") as filep:
for index, line in enumerate(filep):
if index % 2:
line = line.split("\t")
name = line[0].split("/")[-1]
pred = float(line[-1])
if pred > 0.5:
predictions[name] = "s"
else:
predictions[name] = "i"
return predictions
def figure2():
# folds_dir = create_folds("results/dataset.arff", 5)
# run_kea_on_folds(folds_dir)
# read results arff file and print accuracy and f-measure
gts_filen = "results/dataset.arff"
gts = read_gts(gts_filen)
folds_dir = "results/05_folds/"
res_files = [name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name]
acc = []
f1 = []
for res in res_files:
predictions = []
groundtruths = []
preds = read_preds(folds_dir + res)
for name in preds:
if name in gts:
groundtruths.append(gts[name])
predictions.append(preds[name])
acc.append(accuracy_score(groundtruths, predictions))
predictions = [1 if i=="s" else 0 for i in predictions]
groundtruths = [1 if i=="s" else 0 for i in groundtruths]
f1.append(f1_score(groundtruths, predictions, average='weighted'))
# Print average ± standard deviation
print("Accuracy " + str(sum(acc)/float(len(acc))) + " ± " + str(stdev(acc)))
print("F-Measure " + str(sum(f1)/float(len(f1))) + " ± " + str(stdev(f1)))
dir_stats = utils.create_dir("stats/")
with open(dir_stats + "table1_accuracy.csv", "a") as filep:
filep.write("SVMBFF")
for val in acc:
filep.write("," + str(val))
filep.write("\n")
with open(dir_stats + "table1_f1.csv", "a") as filep:
filep.write("SVMBFF")
for val in f1:
filep.write("," + str(val))
filep.write("\n")
# with open(dir_stats + "table1_accuracy.csv", "a") as filep:
# for val in acc:
# filep.write("SVMBFF," + str(val) + "\n")
# with open(dir_stats + "table1_f1.csv", "a") as filep:
# for val in f1:
# filep.write("SVMBFF," + str(val) + "\n")
def extract_features(tracks_dir="tracks/", feat_dir="features/"):
utils.print_success("Extracting features")
tracks_fn = os.listdir(tracks_dir)
utils.create_dir(feat_dir)
feat_dir = utils.create_dir(feat_dir + "svmbff")
bextract = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe "
for index, filename in enumerate(tracks_fn):
utils.print_progress_start(str(index) + "/" + str(len(tracks_fn)) + " " + filename)
track_path = filename + ".mf"
with open(track_path, "w") as filep:
filep.write(tracks_dir + filename + "\n")
new_fn = filename.split(".")[0] + ".arff"
try:
os.system(bextract + track_path + " -w " + new_fn + "> /dev/null 2>&1")
except:
utils.print_info("You have to make marsyas available systemwide, tips:")
utils.print_info("http://marsyas.info/doc/manual/marsyas-user/Step_002dby_002dstep-building-instructions.html#Step_002dby_002dstep-building-instructions")
utils.print_info("http://stackoverflow.com/a/21173918")
utils.print_error("Program exit")
# print(new_fn)
# print(feat_dir + " " + new_fn)
os.rename(new_fn, feat_dir + new_fn)
# os.rename("MARSYAS_EMPTY" + new_fn, feat_dir + new_fn)
os.system("rm " + track_path)
utils.print_progress_end()
os.system("rm bextract_single.mf")
def table1_exp1(folds_dir):
utils.print_success("Experiment 1 in Table 1")
fn_gts = "groundtruths/database1.csv"
gts = utils.read_groundtruths(fn_gts)
res_files = [name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name]
acc = []
f1 = []
for res in res_files:
predictions = []
groundtruths = []
preds = read_preds(folds_dir + res)
for name in preds:
name_gts = name.split(".")[0]
if name_gts in gts:
groundtruths.append(gts[name_gts])
predictions.append(preds[name])
acc.append(accuracy_score(groundtruths, predictions))
predictions = [1 if i=="s" else 0 for i in predictions]
groundtruths = [1 if i=="s" else 0 for i in groundtruths]
f1.append(f1_score(groundtruths, predictions, average='binary'))
# Print average ± standard deviation
utils.print_info("Accuracy " + str(sum(acc)/float(len(acc))) + " ± " + str(stdev(acc)))
utils.print_info("F-Measure " + str(sum(f1)/float(len(f1))) + " ± " + str(stdev(f1)))
dir_res = utils.create_dir("stats/")
with open(dir_res + "table1_accuracy.csv", "a") as filep:
for val in acc:
filep.write("SVMBFF," + str(val) + "\n")
with open(dir_res + "table1_f1.csv", "a") as filep:
for val in f1:
filep.write("SVMBFF," + str(val) + "\n")
def experiment_1(folder="."):
utils.print_success("SVMBFF Experiment 1 (approx. 1 minutes)")
# Variables
folder = utils.abs_path_dir(folder)
dir_tmp = utils.create_dir(folder + "src/tmp/")
dir_svmbff = utils.create_dir(dir_tmp + "svmbff/")
dir_tracks = folder + "tracks/"
dir_feat = folder + "features/svmbff/"
fn_feats_db1 = dir_svmbff + "svmbff_database1.arff"
feats_gts_db1 = folder + "features/" + "svmbff_database1.arff"
groundtruths = folder + "groundtruths/database1.csv"
extract_features(dir_tracks)
merge_arff(dir_feat, fn_feats_db1)
add_groundtruth(fn_feats_db1, groundtruths, feats_gts_db1)
os.remove(fn_feats_db1)
dir_folds = create_folds(feats_gts_db1, 5, dir_svmbff)
run_kea_on_folds(dir_folds)
table1_exp1(dir_folds)
def main():
utils.print_success("SVMBFF (approx. 2 minutes)")
experiment_1(folder="../")
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Validate list of ISRCs")
PARSER.add_argument(
"-i",
"--input_dir",
help="input directory containing all ARFF file from Marsyas bextract",
type=str,
default="data",
metavar="input_dir")
PARSER.add_argument(
"-o",
"--output_file",
help="output file",
type=str,
default="feat_with_groundtruth.txt",
metavar="output_file")
PARSER.add_argument(
"-g",
"--groundtruth_file",
help="groundtruth file",
type=str,
default="groundtruth.txt",
metavar="groundtruth_file")
PARSER.add_argument(
"-n",
"--nb_folds",
default=1,
type=int,
metavar="nb_folds",
help="classification folds number, must be >= 1, default = 1")
main()
# figure2()
# indir1 = "res/"
# indir2 = "/media/sf_DATA/Datasets/Simbals/new/201611/arff/"
# merge_arff(indir2, "test2.arff")
# utils.print_success("Kea classification")
# # Variable declaration
# input_dir = PARSER.parse_args().input_dir
# res_dir = "analysis"
# utils.create_dir(res_dir)
# if input_dir[-1] == "/":
# input_dir = input_dir[:-1]
# proj_dir = res_dir + "/" + input_dir.split("/")[-1]
# utils.create_dir(proj_dir)
# feat_without_groundtruth = proj_dir + "/feat_without_groundtruth.arff"
# feat_with_groundtruth = proj_dir + "/" + PARSER.parse_args().output_file
# # Functions call
# merge_arff(input_dir, feat_without_groundtruth)
# add_groundtruth(feat_without_groundtruth,
# PARSER.parse_args().groundtruth_file,
# feat_with_groundtruth)
# os.remove(feat_without_groundtruth)
# folds_dir = create_folds(feat_with_groundtruth, PARSER.parse_args().nb_folds, invert_train_test=True)
# folds_dir = create_folds("results/train_kea.arff", 5)
# run_kea_on_folds(folds_dir)
# # 2 merge all arff files dans train/test file (generate train/test folds/set,
# # reuse vqmm) à partir des fichiers sources d'un autre dossier, tout copier
# # dans dossier de svmbff. no-overlap train/Test
# # 3 lancer kea sur toutes les train/test
# # 4 Afficher les résultats
# utils.print_success("Finished in " + str(int(round(time.time() * 1000)) - begin) + "ms")
# """
# kea -m tags -w ' + train_file + ' -tw ' + test_file + ' -pr ' + out_file
# """
| mit | 6,587,038,869,646,503,000 | 37.594915 | 166 | 0.56146 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/declarative/scrolling.py | 1 | 2311 | #!/usr/bin/env python
# Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved.
# Contact: PySide Team ([email protected])
#
# This file is part of the examples of PySide: Python for Qt.
#
# You may use this file under the terms of the BSD license as follows:
#
# "Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
from PySide.QtCore import QUrl
from PySide.QtGui import QPushButton, QApplication
from PySide.QtDeclarative import QDeclarativeView
# This example uses a QML file to show a scrolling list containing
# all the items listed into dataList.
dataList = ["Item 1", "Item 2", "Item 3", "Item 4"]
app = QApplication([])
view = QDeclarativeView()
ctxt = view.rootContext()
ctxt.setContextProperty("myModel", dataList)
url = QUrl('view.qml')
view.setSource(url)
view.show()
app.exec_()
| epl-1.0 | 1,676,431,145,130,090,000 | 39.54386 | 72 | 0.762441 | false |
j-marjanovic/myhdl | myhdl/_always_comb.py | 1 | 4522 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2009 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always_comb function. """
from __future__ import absolute_import
import sys
import inspect
from types import FunctionType
import re
import ast
from myhdl import AlwaysCombError
from myhdl._Signal import _Signal, _isListOfSigs
from myhdl._util import _isGenFunc, _dedent
from myhdl._Waiter import _Waiter, _SignalWaiter, _SignalTupleWaiter
from myhdl._instance import _Instantiator
from myhdl._always import _Always
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
class _error:
pass
_error.ArgType = "always_comb argument should be a classic function"
_error.NrOfArgs = "always_comb argument should be a function without arguments"
_error.Scope = "always_comb argument should be a local function"
_error.SignalAsInout = "signal (%s) used as inout in always_comb function argument"
_error.EmbeddedFunction = "embedded functions in always_comb function argument not supported"
_error.EmptySensitivityList= "sensitivity list is empty"
def always_comb(func):
if not isinstance( func, FunctionType):
raise AlwaysCombError(_error.ArgType)
if _isGenFunc(func):
raise AlwaysCombError(_error.ArgType)
if func.__code__.co_argcount > 0:
raise AlwaysCombError(_error.NrOfArgs)
c = _AlwaysComb(func)
return c
# class _AlwaysComb(_Instantiator):
class _AlwaysComb(_Always):
# def __init__(self, func, symdict):
# self.func = func
# self.symdict = symdict
# s = inspect.getsource(func)
# # remove decorators
# s = re.sub(r"@.*", "", s)
# s = s.lstrip()
# tree = compiler.parse(s)
# v = _SigNameVisitor(symdict)
# compiler.walk(tree, v)
# self.inputs = v.inputs
# self.outputs = v.outputs
# senslist = []
# for n in self.inputs:
# s = self.symdict[n]
# if isinstance(s, Signal):
# senslist.append(s)
# else: # list of sigs
# senslist.extend(s)
# self.senslist = tuple(senslist)
# self.gen = self.genfunc()
# if len(self.senslist) == 0:
# raise AlwaysCombError(_error.EmptySensitivityList)
# if len(self.senslist) == 1:
# W = _SignalWaiter
# else:
# W = _SignalTupleWaiter
# self.waiter = W(self.gen)
def __init__(self, func):
senslist = []
super(_AlwaysComb, self).__init__(func, senslist)
s = inspect.getsource(func)
s = _dedent(s)
tree = ast.parse(s)
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
self.inputs = v.results['input']
self.outputs = v.results['output']
inouts = v.results['inout'] | self.inputs.intersection(self.outputs)
if inouts:
raise AlwaysCombError(_error.SignalAsInout % inouts)
if v.results['embedded_func']:
raise AlwaysCombError(_error.EmbeddedFunction)
for n in self.inputs:
s = self.symdict[n]
if isinstance(s, _Signal):
senslist.append(s)
elif _isListOfSigs(s):
senslist.extend(s)
self.senslist = tuple(senslist)
if len(self.senslist) == 0:
raise AlwaysCombError(_error.EmptySensitivityList)
def genfunc(self):
senslist = self.senslist
if len(senslist) == 1:
senslist = senslist[0]
func = self.func
while 1:
func()
yield senslist
| lgpl-2.1 | -1,478,581,630,403,665,000 | 33 | 93 | 0.638655 | false |
enricobacis/cineca-scopus | src/cineca3.py | 1 | 3928 | #!/usr/bin/env python
#coding: utf-8
from contextlib import closing
from operator import itemgetter
from datetime import datetime
from argparse import ArgumentParser
from unicodecsv import DictWriter
from utils import read_cineca_file, csv_to_db
import sqlite3
import json
import re
FIELDS = ['Ateneo', 'Facoltà', 'Fascia', 'Genere', 'S.C.',
'Servizio prestato in altro ateneo', 'Struttura di afferenza',
'author', 'identifier', 'eid', 'title', 'aggregationType',
'citedby-count', 'publicationName', 'isbn', 'issn', 'volume',
'issueIdentifier', 'pageRange', 'pageNum', 'coverDate',
'coverDisplayDate', 'doi', 'numAuthors']
QUERY = 'SELECT entries FROM articles WHERE author = ? AND ateneo = ?'
def pagenum(pageRange):
try:
page = list(map(int, pageRange.split('-')))
return 1 if len(page) == 1 else page[1] - page[0]
except:
return None
def process(entry):
for key, value in list(entry.items()):
if ':' in key:
del entry[key]
key = key.partition(':')[2]
entry[key] = value
match = re.match('Author list of (\d+)', entry.get('message', ''))
if match: entry['numAuthors'] = int(match.group(1))
else: entry['numAuthors'] = len(entry.get('author', [])) or None
# eid and identifier default to 0
entry['eid'] = entry.get('eid', 0)
entry['identifier'] = entry.get('identifier', 0)
# validate coverDate (or default to 1900-01-01)
date = entry.get('coverDate', '')
try:
datesplit = list(map(int, date.split('-')))
if len(datesplit) == 3 and datesplit[1] == 0:
date = '%d-%d-%s' % (datesplit[0], datesplit[1]+1, datesplit[2])
datetime.strptime(date, '%Y-%m-%d')
except: entry['coverDate'] = '1900-01-01'
entry['author'] = entry['Cognome e Nome']
entry['pageNum'] = pagenum(entry.get('pageRange', None))
return entry
def mergedicts(*dicts):
return {k:v for d in dicts for k,v in d.items()}
if __name__ == '__main__':
from config import FILENAME, DBFILE, OUTFILE, PRODUCTSDB
parser = ArgumentParser('convert scopus db to csv')
parser.add_argument('--input', default=FILENAME, help='cineca input file')
parser.add_argument('--db', default=DBFILE, help='database file')
parser.add_argument('--output', default=OUTFILE, help='output csv file')
parser.add_argument('--outdb', default=PRODUCTSDB, help='output db file')
args = parser.parse_args()
with open(args.output, 'wb') as outfile:
csvreader = [row.to_dict() for row in read_cineca_file(args.input)]
authors = [(row['Cognome e Nome'], row['Ateneo'], row) for row in csvreader]
authors.sort(key=itemgetter(0, 1))
csvwriter = DictWriter(outfile, FIELDS, extrasaction='ignore', encoding='utf-8')
csvwriter.writeheader()
with sqlite3.connect(args.db) as connection:
with closing(connection.cursor()) as cursor:
for author, ateneo, authordata in authors:
entries = cursor.execute(QUERY, (author,ateneo)).fetchall()
if not entries:
print('Empty entry added for %s' % author)
csvwriter.writerow(process(authordata))
else:
inserted = set()
for entry in json.loads(entries[0][0]):
ID = entry.get('dc:identifier', '')
print('%s\t%s' % (author, ID))
if ID in inserted:
print(' * duplicate ignored *')
else:
inserted.add(ID)
csvwriter.writerow(process(mergedicts(authordata, entry)))
print('\n[*] Converting csv to sqlite3db ...')
csv_to_db(args.output, args.outdb, 'products')
| mit | -8,302,143,502,810,306,000 | 37.881188 | 90 | 0.578813 | false |
j-rock/cs598ps | src/py/main.py | 1 | 4059 | import sys
import time
from cssigps.offlineclassifier import *
from cssigps.dataset import *
from cssigps.feature import *
from cssigps.experiments import *
from get_dropbox_path import *
def print_usage():
"""
Print the usage for the main script.
"""
print("USAGE: use the run.sh or the main.py directly.")
print("")
print(" run.sh <EXPERIMENT_NUMBER>")
print(" python main.py <EXPERIMENT_NUMBER>")
if __name__ == '__main__':
# decide which experiment to run based on the command line or user-input
response = ""
if len(sys.argv) >= 2:
response=sys.argv[1]
if response in ["-h","--help"]:
print_usage()
quit()
else:
prompt = "Which experiment would you like to run? [0-2]"
response = raw_input(prompt)
# run experiment
if response == "0":
path=get_dropbox_path()+"old-test/"
run_experiment_0(path)
elif response == "1":
run_experiment_1(include_none=True)
elif response == "2":
run_experiment_2()
elif response == "3":
run_experiment_3()
elif response == "4":
run_experiment_4()
elif response == "5":
path=get_dropbox_path()+"vowels-test/"
run_offline_svm(path)
elif response == "S":
# run single class classifier
c = sys.argv[2]
f = sys.argv[3]
classes=["NONE"]
path=get_dropbox_path()+"yes-no-test/"
factory = FBankFeature()
# select the class
if c == "Y":
path=get_dropbox_path()+"yes-no-test/"
classes.append("Y")
elif c=="N":
path=get_dropbox_path()+"yes-no-test/"
classes.append("N")
elif c=="A":
path=get_dropbox_path()+"vowels-test/"
classes=["A","NONE"]
elif c=="E":
path=get_dropbox_path()+"vowels-test/"
classes=["E","NONE"]
elif c=="I":
path=get_dropbox_path()+"vowels-test/"
classes=["I","NONE"]
elif c=="O":
path=get_dropbox_path()+"vowels-test/"
classes=["O","NONE"]
elif c=="U":
path=get_dropbox_path()+"vowels-test/"
classes=["U","NONE"]
else:
print("class argument invalid")
quit()
# select the feature
if f == "fbank":
factory=FBankFeature()
elif f == "m" or f == "magnitude":
factory=MagnitudeFeature()
elif f == "t" or f == "template":
factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep())
else:
print("feature argument invalid")
samples = find_testsamples(path)
sample_set = SampleSet(samples,classes=classes)
sample_set.stats()
run_sample_experiment(sample_set,feat_factory=factory)
elif response == "M":
# run multi class classifier
c = sys.argv[2]
f = sys.argv[3]
classes=["NONE"]
path=get_dropbox_path()+"yes-no-test/"
factory = FBankFeature()
# select the class
if c == "Y":
path=get_dropbox_path()+"yes-no-test/"
classes=["Y","N","NONE"]
elif c=="A":
path=get_dropbox_path()+"vowels-test/"
classes=["A","E","I","O","U","NONE"]
else:
print("class argument invalid")
quit()
samples = find_testsamples(path)
sample_set = SampleSet(samples,classes=classes)
sample_set.stats()
# select the feature
if f == "fbank":
factory=FBankFeature()
elif f == "m" or f == "magnitude":
factory=MagnitudeFeature()
elif f == "t" or f == "template":
factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep())
else:
print("feature argument invalid")
run_sample_experiment(sample_set,feat_factory=factory)
else:
print("Invalid option. Aborting..")
| mit | -7,437,332,897,349,050,000 | 29.75 | 103 | 0.537078 | false |
jtaghiyar/kronos | setup.py | 1 | 1513 | '''
Created on Jul 10, 2014
@author: jtaghiyar
'''
import codecs
import os
import re
from setuptools import setup
def read(*paths):
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, *paths)) as f:
return f.read()
def get_version():
version_file = read("kronos", "kronos_version.py")
version_match = re.search(r"^kronos_version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.md')
setup(name='kronos_pipeliner',
version=get_version(),
description='A workflow assembler for genome analytics and informatics',
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='pipeline workflow bioinformatics kronos',
author='M. Jafar Taghiyar',
author_email='[email protected]',
url='https://github.com/jtaghiyar/kronos',
license='MIT',
packages=['kronos', 'templates'],
entry_points={'console_scripts':['kronos=kronos:main']},
install_requires = ['pyyaml>=3.11', 'ruffus==2.4.1']
)
| mit | -666,503,168,781,725,600 | 30.520833 | 78 | 0.621943 | false |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/micro/boolean-logic.py | 1 | 1621 | # zwei 04/24/2014
# micro benchmark: method call polymorphic inspired by richards
import time
iteration = 50000
class Task(object):
def __init__(self, p, w, h):
self.packet_pending = p
self.task_waiting = w
self.task_holding = h
self.link = None
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isTaskHoldingOrWaiting(task_holding, packet_pending, task_waiting):
return task_holding or (not packet_pending and task_waiting)
TASK_LIST = [Task(False, False, True),
Task(False, True, False),
Task(True, True, False),
Task(True, False, True)]
def setupTaskQueue():
prev = None
for t in TASK_LIST:
t.link = prev
prev = t
return t
TASK_QUEUE = setupTaskQueue()
def dostuff():
total = 0
for i in range(iteration):
t = TASK_QUEUE
while t is not None:
if (t.isTaskHoldingOrWaiting()):
total += 1
t = t.link
return total
def noObjectDoStuff():
p = True
w = False
h = True
total = 0
for i in range(iteration):
h = isTaskHoldingOrWaiting(h, p, w)
if (isTaskHoldingOrWaiting(h, p, w)):
total += 1
return total
def measure(num):
print("Start timing...")
start = time.time()
for i in range(num): # 50000
result = dostuff()
print(result)
duration = "%.3f\n" % (time.time() - start)
print("boolean-logic: " + duration)
# warm up
for i in range(500):
dostuff()
measure(1000) | bsd-3-clause | -691,052,674,666,857,300 | 21.527778 | 83 | 0.586675 | false |
pelson/conda-build | tests/test_misc.py | 4 | 2003 | import json
from os.path import join
import pytest
from conda_build.utils import on_win
import conda_build._link as _link
from conda_build.conda_interface import PathType, EntityEncoder, CrossPlatformStLink
def test_pyc_f_2():
assert _link.pyc_f('sp/utils.py', (2, 7, 9)) == 'sp/utils.pyc'
def test_pyc_f_3():
for f, r in [
('sp/utils.py',
'sp/__pycache__/utils.cpython-34.pyc'),
('sp/foo/utils.py',
'sp/foo/__pycache__/utils.cpython-34.pyc'),
]:
assert _link.pyc_f(f, (3, 4, 2)) == r
def test_pathtype():
hardlink = PathType("hardlink")
assert str(hardlink) == "hardlink"
assert hardlink.__json__() == 'hardlink'
softlink = PathType("softlink")
assert str(softlink) == "softlink"
assert softlink.__json__() == "softlink"
def test_entity_encoder(tmpdir):
test_file = join(str(tmpdir), "test-file")
test_json = {"a": PathType("hardlink"), "b": 1}
with open(test_file, "w") as f:
json.dump(test_json, f, cls=EntityEncoder)
with open(test_file, "r") as f:
json_file = json.load(f)
assert json_file == {"a": "hardlink", "b": 1}
@pytest.mark.skipif(on_win, reason="link not available on win/py2.7")
def test_crossplatform_st_link(tmpdir):
from os import link
test_file = join(str(tmpdir), "test-file")
test_file_linked = join(str(tmpdir), "test-file-linked")
test_file_link = join(str(tmpdir), "test-file-link")
open(test_file, "a").close()
open(test_file_link, "a").close()
link(test_file_link, test_file_linked)
assert 1 == CrossPlatformStLink.st_nlink(test_file)
assert 2 == CrossPlatformStLink.st_nlink(test_file_link)
assert 2 == CrossPlatformStLink.st_nlink(test_file_linked)
@pytest.mark.skipif(not on_win, reason="already tested")
def test_crossplatform_st_link_on_win(tmpdir):
test_file = join(str(tmpdir), "test-file")
open(test_file, "a").close()
assert 1 == CrossPlatformStLink.st_nlink(test_file)
| bsd-3-clause | -808,085,511,508,831,500 | 30.296875 | 84 | 0.636046 | false |
BiznetGIO/horizon | openstack_dashboard/api/base.py | 1 | 12067 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Sequence
import functools
from django.conf import settings
import semantic_version
import six
from horizon import exceptions
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
@functools.total_ordering
class Version(object):
def __init__(self, version):
self.version = semantic_version.Version(str(version), partial=True)
def __eq__(self, other):
return self.version == Version(other).version
def __lt__(self, other):
return self.version < Version(other).version
def __repr__(self):
return "Version('%s')" % self.version
def __str__(self):
return str(self.version)
def __hash__(self):
return hash(str(self.version))
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
version = Version(version)
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
version = Version(key)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if version not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (version, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = version
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def to_dict(self):
return self._apidict
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects.
This turns the individual quotas into Quota objects
for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one.
Existing quotas are not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if 'type' not in service:
continue
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
if 'type' not in service:
return None
identity_version = get_version_from_service(service)
service_endpoints = service.get('endpoints', [])
available_endpoints = [endpoint for endpoint in service_endpoints
if region == _get_endpoint_region(endpoint)]
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service_endpoints]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint.get(endpoint_type)
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint.get('interface') == interface:
return endpoint.get('url')
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service.get('endpoints', []):
if 'type' not in service:
continue
# ignore region for identity
if service['type'] == 'identity' or \
_get_endpoint_region(endpoint) == region:
return True
return False
def _get_endpoint_region(endpoint):
"""Common function for getting the region from endpoint.
In Keystone V3, region has been deprecated in favor of
region_id.
This method provides a way to get region that works for
both Keystone V2 and V3.
"""
return endpoint.get('region_id') or endpoint.get('region')
| apache-2.0 | 6,872,752,707,314,681,000 | 31.790761 | 79 | 0.59642 | false |
CLVsol/oehealth_gs | oehealth_gs_medicament/__openerp__.py | 1 | 2003 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
{
'name': 'OpenERP Health: Medicament - GaranteSul',
'version': '1.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://oehealth.org',
'description': '''
''',
'images': [],
'depends': ['oehealth_medicament',
'oehealth_gs_base',
],
'data': [],
'demo': [],
'test': [],
'init_xml': ['oehealth_medicament_view.xml',
],
'test': [],
'update_xml': [],
'installable': True,
'active': False,
'css': [],
}
| agpl-3.0 | -2,859,585,841,854,151,000 | 45.581395 | 80 | 0.423365 | false |
ozmartian/tvlinker | tvlinker/threads.py | 1 | 11150 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
from datetime import datetime, timedelta
from tzlocal import get_localzone
import pytz
import requests
from PyQt5.QtCore import QObject, QSettings, QThread, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QMessageBox, qApp
from bs4 import BeautifulSoup
from requests.exceptions import HTTPError
import cloudscraper
from tvlinker.filesize import alternative, size
try:
# noinspection PyPackageRequirements
import simplejson as json
except ImportError:
import json
class ShadowSocks:
config = {
'ssocks': {
'procs': ['ss-qt5', 'sslocal'],
'proxies': {
'http': 'socks5://127.0.0.1:1080',
'https': 'socks5://127.0.0.1:1080'
},
},
'v2ray': {
'procs': ['v2ray'],
'proxies': {
'http': 'socks5://127.0.0.1:10808',
'https': 'socks5://127.0.0.1:10808'
}
}
}
@staticmethod
def detect() -> str:
if sys.platform.startswith('linux'):
ptypes = ShadowSocks.config.keys()
ps = os.popen('ps -Af').read()
for ptype in ptypes:
procs = ShadowSocks.config[ptype]['procs']
for p in procs:
if ps.count(p):
return ptype
return None
@staticmethod
def proxies() -> dict:
proxy_type = ShadowSocks.detect()
return ShadowSocks.config[proxy_type]['proxies'] if proxy_type is not None else {}
class ScrapeWorker(QObject):
addRow = pyqtSignal(list)
workFinished = pyqtSignal()
def __init__(self, source_url: str, useragent: str, maxpages: int):
super(ScrapeWorker, self).__init__()
self.maxpages = maxpages
self.source_url = source_url
self.user_agent = useragent
self.scraper = cloudscraper.create_scraper()
self.scraper.proxies = ShadowSocks.proxies()
self.tz_format = '%b %d %Y %H:%M'
self.tz_local = get_localzone()
self.complete = False
def scrape(self, pagenum: int) -> None:
try:
url = self.source_url.format(pagenum + 1)
req = self.scraper.get(url)
bs = BeautifulSoup(req.text, 'lxml')
posts = bs('div', class_='post')
for post in posts:
dt_utc = datetime.strptime(post.find('div', class_='p-c p-c-time').get_text().strip(), self.tz_format)
# TODO: fix hardcoded DST adjustment
dt_local = dt_utc.replace(tzinfo=pytz.utc).astimezone(self.tz_local) - timedelta(hours=2)
dlsize = post.find('h2').get_text().strip()
table_row = [
dt_local.strftime(self.tz_format),
post.find('a', class_='p-title').get('href').strip(),
post.find('a', class_='p-title').get_text().strip(),
dlsize[dlsize.rfind('(') + 1:len(dlsize) - 1]
]
self.addRow.emit(table_row)
except HTTPError:
sys.stderr.write(sys.exc_info()[0])
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0])
# self.exit()
@pyqtSlot()
def begin(self):
for page in range(self.maxpages):
if QThread.currentThread().isInterruptionRequested():
return
self.scrape(page)
self.complete = True
self.workFinished.emit()
class HostersThread(QThread):
setHosters = pyqtSignal(list)
noLinks = pyqtSignal()
def __init__(self, link_url: str, useragent: str):
QThread.__init__(self)
self.link_url = link_url
self.user_agent = useragent
self.scraper = cloudscraper.create_scraper()
self.scraper.proxies = ShadowSocks.proxies()
def __del__(self) -> None:
self.wait()
def get_hoster_links(self) -> None:
try:
req = self.scraper.get(self.link_url)
bs = BeautifulSoup(req.text, 'lxml')
links = bs.select('div.post h2[style="text-align: center;"]')
self.setHosters.emit(links)
except HTTPError:
print(sys.exc_info()[0])
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0])
QThread.currentThread().quit()
except IndexError:
self.noLinks.emit()
QThread.currentThread().quit()
def run(self) -> None:
self.get_hoster_links()
class RealDebridThread(QThread):
unrestrictedLink = pyqtSignal(str)
supportedHosts = pyqtSignal(dict)
hostStatus = pyqtSignal(dict)
errorMsg = pyqtSignal(list)
class RealDebridAction:
UNRESTRICT_LINK = 0,
SUPPORTED_HOSTS = 1,
HOST_STATUS = 2
def __init__(self,
settings: QSettings,
api_url: str,
link_url: str,
action: RealDebridAction = RealDebridAction.UNRESTRICT_LINK,
check_host: str = None):
QThread.__init__(self)
self.api_url = api_url
self.api_token = settings.value('realdebrid_apitoken')
self.api_proxy = settings.value('realdebrid_apiproxy', False, bool)
self.link_url = link_url
self.action = action
self.check_host = check_host
self.proxies = ShadowSocks.proxies() if self.api_proxy else {}
def __del__(self):
self.wait()
def post(self, endpoint: str, payload: object = None) -> dict:
try:
res = requests.post('{0}{1}?auth_token={2}'.format(self.api_url, endpoint, self.api_token),
data=payload, proxies=self.proxies)
return res.json()
except HTTPError:
print(sys.exc_info())
self.errorMsg.emit([
'ERROR NOTIFICATION',
'<h3>Real-Debrid API Error</h3>'
'A problem occurred whilst communicating with Real-Debrid. Please check your '
'Internet connection.<br/><br/>'
'<b>ERROR LOG:</b><br/>(Error Code %s) %s<br/>%s' %
(qApp.applicationName(), HTTPError.code, HTTPError.reason)
])
# self.exit()
def unrestrict_link(self) -> None:
jsonres = self.post(endpoint='/unrestrict/link', payload={'link': self.link_url})
if 'download' in jsonres.keys():
self.unrestrictedLink.emit(jsonres['download'])
else:
self.errorMsg.emit([
'REALDEBRID ERROR',
'<h3>Could not unrestrict link</h3>The hoster is most likely '
'down, please try again later.<br/><br/>{}'.format(jsonres)
])
def supported_hosts(self) -> None:
jsonres = self.post(endpoint='/hosts')
self.supportedHosts.emit(jsonres)
# def host_status(self, host: str) -> None:
# jsonres = self.post(endpoint='/hosts/status')
# self.hostStatus.emit(jsonres)
def run(self) -> None:
if self.action == RealDebridThread.RealDebridAction.UNRESTRICT_LINK:
self.unrestrict_link()
elif self.action == RealDebridThread.RealDebridAction.SUPPORTED_HOSTS:
self.supported_hosts()
# elif self.action == RealDebridThread.HOST_STATUS:
# self.host_status(self.check_host)
class Aria2Thread(QThread):
aria2Confirmation = pyqtSignal(bool)
def __init__(self, settings: QSettings, link_url: str):
QThread.__init__(self)
self.rpc_host = settings.value('aria2_rpc_host')
self.rpc_port = settings.value('aria2_rpc_port')
self.rpc_secret = settings.value('aria2_rpc_secret')
self.rpc_username = settings.value('aria2_rpc_username')
self.rpc_password = settings.value('aria2_rpc_password')
self.link_url = link_url
def __del__(self) -> None:
self.wait()
def add_uri(self) -> None:
user, passwd = '', ''
if len(self.rpc_username) > 0 and len(self.rpc_password) > 0:
user = self.rpc_username
passwd = self.rpc_password
elif len(self.rpc_secret) > 0:
user = 'token'
passwd = self.rpc_secret
aria2_endpoint = '%s:%s/jsonrpc' % (self.rpc_host, self.rpc_port)
headers = {'Content-Type': 'application/json'}
payload = json.dumps(
{
'jsonrpc': '2.0',
'id': 1,
'method': 'aria2.addUri',
'params': ['%s:%s' % (user, passwd), [self.link_url]]
},
sort_keys=False).encode('utf-8')
try:
from urllib.parse import urlencode
from urllib.request import Request, urlopen
req = Request(aria2_endpoint, headers=headers, data=payload)
res = urlopen(req).read().decode('utf-8')
jsonres = json.loads(res)
# res = requests.post(aria2_endpoint, headers=headers, data=payload)
# jsonres = res.json()
self.aria2Confirmation.emit('result' in jsonres.keys())
except HTTPError:
print(sys.exc_info())
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info(), QMessageBox.Ok)
self.aria2Confirmation.emit(False)
# self.exit()
def run(self) -> None:
self.add_uri()
class DownloadThread(QThread):
dlComplete = pyqtSignal()
dlProgress = pyqtSignal(int)
dlProgressTxt = pyqtSignal(str)
def __init__(self, link_url: str, dl_path: str):
QThread.__init__(self)
self.download_link = link_url
self.download_path = dl_path
self.cancel_download = False
self.proxies = ShadowSocks.proxies()
def __del__(self) -> None:
self.wait()
def download_file(self) -> None:
req = requests.get(self.download_link, stream=True, proxies=self.proxies)
filesize = int(req.headers['Content-Length'])
filename = os.path.basename(self.download_path)
downloadedChunk = 0
blockSize = 8192
start = time.clock()
with open(self.download_path, 'wb') as f:
for chunk in req.iter_content(chunk_size=blockSize):
if self.cancel_download or not chunk:
req.close()
break
f.write(chunk)
downloadedChunk += len(chunk)
progress = float(downloadedChunk) / filesize
self.dlProgress.emit(progress * 100)
dlspeed = downloadedChunk // (time.clock() - start) / 1000
progressTxt = '<b>Downloading {0}</b>:<br/>{1} of <b>{3}</b> [{2:.2%}] [{4} kbps]' \
.format(filename, downloadedChunk, progress, size(filesize, system=alternative), dlspeed)
self.dlProgressTxt.emit(progressTxt)
self.dlComplete.emit()
def run(self) -> None:
self.download_file()
| gpl-3.0 | 5,962,978,612,205,332,000 | 34.623003 | 118 | 0.562332 | false |
xunilrj/sandbox | courses/course-edx-dat2031x/Simulation.py | 1 | 2680 | # -*- coding: utf-8 -*-
def sim_normal(nums, mean = 600, sd = 30):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.normal(loc = mean, scale = sd, size = n)
titl = 'Normal distribution with ' + str(n) + ' values'
print('Summary for ' + str(n) + ' samples')
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def sim_poisson(nums, mean = 600):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.poisson(lam = mean, size = n)
titl = 'Poisson distribution with ' + str(n) + ' values'
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def dist_summary(dist, names = 'dist_name'):
import pandas as pd
import matplotlib.pyplot as plt
ser = pd.Series(dist)
fig = plt.figure(1, figsize=(9, 6))
ax = fig.gca()
ser.hist(ax = ax, bins = 120)
ax.set_title('Frequency distribution of ' + names)
ax.set_ylabel('Frequency')
plt.show()
return(ser.describe())
def gen_profits(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [5 if x < 0.3 else (3.5 if x < 0.6 else 4) for x in unif]
return(out)
def gen_tips(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [0 if x < 0.5 else (0.25 if x < 0.7
else (1.0 if x < 0.9 else 2.0)) for x in unif]
return(out)
def sim_lemonade(num, mean = 600, sd = 30, pois = False):
## Simulate the profits and tips for
## a lemonade stand.
import numpy.random as nr
## number of customer arrivals
if pois:
arrivals = nr.poisson(lam = mean, size = num)
else:
arrivals = nr.normal(loc = mean, scale = sd, size = num)
print(dist_summary(arrivals, 'customer arrivals per day'))
## Compute distibution of average profit per arrival
proft = gen_profits(num)
print(dist_summary(proft, 'profit per arrival'))
## Total profits are profit per arrival
## times number of arrivals.
total_profit = arrivals * proft
print(dist_summary(total_profit, 'total profit per day'))
## Compute distribution of average tips per arrival
tps = gen_tips(num)
print(dist_summary(tps, 'tips per arrival'))
## Compute average tips per day
total_tips = arrivals * tps
print(dist_summary(total_tips, 'total tips per day'))
## Compute total profits plus total tips.
total_take = total_profit + total_tips
return(dist_summary(total_take, 'total net per day'))
| apache-2.0 | 8,129,955,515,365,953,000 | 29.804598 | 67 | 0.614179 | false |
samdroid-apps/sugar-toolkit-gtk3 | src/sugar3/bundle/activitybundle.py | 1 | 14091 | # Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""Sugar activity bundles
UNSTABLE.
"""
from ConfigParser import ConfigParser
from locale import normalize
import os
import shutil
import tempfile
import logging
from sugar3 import env
from sugar3.bundle.bundle import Bundle, \
MalformedBundleException, NotInstalledException
from sugar3.bundle.bundleversion import NormalizedVersion
from sugar3.bundle.bundleversion import InvalidVersionError
def _expand_lang(locale):
# Private method from gettext.py
locale = normalize(locale)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask + 1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY:
val += territory
if i & COMPONENT_CODESET:
val += codeset
if i & COMPONENT_MODIFIER:
val += modifier
ret.append(val)
ret.reverse()
return ret
class ActivityBundle(Bundle):
"""A Sugar activity bundle
See http://wiki.laptop.org/go/Activity_bundles for details
"""
MIME_TYPE = 'application/vnd.olpc-sugar'
_zipped_extension = '.xo'
_unzipped_extension = '.activity'
_infodir = 'activity'
def __init__(self, path, translated=True):
Bundle.__init__(self, path)
self.activity_class = None
self.bundle_exec = None
self._name = None
self._icon = None
self._bundle_id = None
self._mime_types = None
self._show_launcher = True
self._tags = None
self._activity_version = '0'
self._summary = None
self._single_instance = False
info_file = self.get_file('activity/activity.info')
if info_file is None:
raise MalformedBundleException('No activity.info file')
self._parse_info(info_file)
if translated:
linfo_file = self._get_linfo_file()
if linfo_file:
self._parse_linfo(linfo_file)
def _parse_info(self, info_file):
cp = ConfigParser()
cp.readfp(info_file)
section = 'Activity'
if cp.has_option(section, 'bundle_id'):
self._bundle_id = cp.get(section, 'bundle_id')
else:
if cp.has_option(section, 'service_name'):
self._bundle_id = cp.get(section, 'service_name')
logging.error('ATTENTION: service_name property in the '
'activity.info file is deprecated, should be '
' changed to bundle_id')
else:
raise MalformedBundleException(
'Activity bundle %s does not specify a bundle id' %
self._path)
if cp.has_option(section, 'name'):
self._name = cp.get(section, 'name')
else:
raise MalformedBundleException(
'Activity bundle %s does not specify a name' % self._path)
if cp.has_option(section, 'exec'):
self.bundle_exec = cp.get(section, 'exec')
else:
raise MalformedBundleException(
'Activity bundle %s must specify either class or exec' %
self._path)
if cp.has_option(section, 'mime_types'):
mime_list = cp.get(section, 'mime_types').strip(';')
self._mime_types = [mime.strip() for mime in mime_list.split(';')]
if cp.has_option(section, 'show_launcher'):
if cp.get(section, 'show_launcher') == 'no':
self._show_launcher = False
if cp.has_option(section, 'tags'):
tag_list = cp.get(section, 'tags').strip(';')
self._tags = [tag.strip() for tag in tag_list.split(';')]
if cp.has_option(section, 'icon'):
self._icon = cp.get(section, 'icon')
if cp.has_option(section, 'activity_version'):
version = cp.get(section, 'activity_version')
try:
NormalizedVersion(version)
except InvalidVersionError:
raise MalformedBundleException(
'Activity bundle %s has invalid version number %s' %
(self._path, version))
self._activity_version = version
if cp.has_option(section, 'summary'):
self._summary = cp.get(section, 'summary')
if cp.has_option(section, 'single_instance'):
if cp.get(section, 'single_instance') == 'yes':
self._single_instance = True
def _get_linfo_file(self):
# Using method from gettext.py, first find languages from environ
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
# Next, normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# Finally, select a language
for lang in nelangs:
linfo_path = os.path.join('locale', lang, 'activity.linfo')
linfo_file = self.get_file(linfo_path)
if linfo_file is not None:
return linfo_file
return None
def _parse_linfo(self, linfo_file):
cp = ConfigParser()
cp.readfp(linfo_file)
section = 'Activity'
if cp.has_option(section, 'name'):
self._name = cp.get(section, 'name')
if cp.has_option(section, 'summary'):
self._summary = cp.get(section, 'summary')
if cp.has_option(section, 'tags'):
tag_list = cp.get(section, 'tags').strip(';')
self._tags = [tag.strip() for tag in tag_list.split(';')]
def get_locale_path(self):
"""Get the locale path inside the (installed) activity bundle."""
if self._zip_file is not None:
raise NotInstalledException
return os.path.join(self._path, 'locale')
def get_icons_path(self):
"""Get the icons path inside the (installed) activity bundle."""
if self._zip_file is not None:
raise NotInstalledException
return os.path.join(self._path, 'icons')
def get_name(self):
"""Get the activity user-visible name."""
return self._name
def get_bundle_id(self):
"""Get the activity bundle id"""
return self._bundle_id
def get_icon(self):
"""Get the activity icon name"""
# FIXME: this should return the icon data, not a filename, so that
# we don't need to create a temp file in the zip case
icon_path = os.path.join('activity', self._icon + '.svg')
if self._zip_file is None:
return os.path.join(self._path, icon_path)
else:
icon_data = self.get_file(icon_path).read()
temp_file, temp_file_path = tempfile.mkstemp(prefix=self._icon,
suffix='.svg')
os.write(temp_file, icon_data)
os.close(temp_file)
return temp_file_path
def get_activity_version(self):
"""Get the activity version"""
return self._activity_version
def get_command(self):
"""Get the command to execute to launch the activity factory"""
if self.bundle_exec:
command = os.path.expandvars(self.bundle_exec)
else:
command = 'sugar-activity ' + self.activity_class
return command
def get_mime_types(self):
"""Get the MIME types supported by the activity"""
return self._mime_types
def get_tags(self):
"""Get the tags that describe the activity"""
return self._tags
def get_summary(self):
"""Get the summary that describe the activity"""
return self._summary
def get_single_instance(self):
"""Get whether there should be a single instance for the activity"""
return self._single_instance
def get_show_launcher(self):
"""Get whether there should be a visible launcher for the activity"""
return self._show_launcher
def install(self):
install_dir = env.get_user_activities_path()
self._unzip(install_dir)
install_path = os.path.join(install_dir, self._zip_root_dir)
self.install_mime_type(install_path)
return install_path
def install_mime_type(self, install_path):
""" Update the mime type database and install the mime type icon
"""
xdg_data_home = os.getenv('XDG_DATA_HOME',
os.path.expanduser('~/.local/share'))
mime_path = os.path.join(install_path, 'activity', 'mimetypes.xml')
if os.path.isfile(mime_path):
mime_dir = os.path.join(xdg_data_home, 'mime')
mime_pkg_dir = os.path.join(mime_dir, 'packages')
if not os.path.isdir(mime_pkg_dir):
os.makedirs(mime_pkg_dir)
installed_mime_path = os.path.join(mime_pkg_dir,
'%s.xml' % self._bundle_id)
self._symlink(mime_path, installed_mime_path)
os.spawnlp(os.P_WAIT, 'update-mime-database',
'update-mime-database', mime_dir)
mime_types = self.get_mime_types()
if mime_types is not None:
installed_icons_dir = \
os.path.join(xdg_data_home,
'icons/sugar/scalable/mimetypes')
if not os.path.isdir(installed_icons_dir):
os.makedirs(installed_icons_dir)
for mime_type in mime_types:
mime_icon_base = os.path.join(install_path, 'activity',
mime_type.replace('/', '-'))
svg_file = mime_icon_base + '.svg'
info_file = mime_icon_base + '.icon'
self._symlink(svg_file,
os.path.join(installed_icons_dir,
os.path.basename(svg_file)))
self._symlink(info_file,
os.path.join(installed_icons_dir,
os.path.basename(info_file)))
def _symlink(self, src, dst):
if not os.path.isfile(src):
return
if not os.path.islink(dst) and os.path.exists(dst):
raise RuntimeError('Do not remove %s if it was not '
'installed by sugar', dst)
logging.debug('Link resource %s to %s', src, dst)
if os.path.lexists(dst):
logging.debug('Relink %s', dst)
os.unlink(dst)
os.symlink(src, dst)
def uninstall(self, force=False, delete_profile=False):
install_path = self.get_path()
if os.path.islink(install_path):
# Don't remove the actual activity dir if it's a symbolic link
# because we may be removing user data.
os.unlink(install_path)
return
xdg_data_home = os.getenv('XDG_DATA_HOME',
os.path.expanduser('~/.local/share'))
mime_dir = os.path.join(xdg_data_home, 'mime')
installed_mime_path = os.path.join(mime_dir, 'packages',
'%s.xml' % self._bundle_id)
if os.path.exists(installed_mime_path):
os.remove(installed_mime_path)
os.spawnlp(os.P_WAIT, 'update-mime-database',
'update-mime-database', mime_dir)
mime_types = self.get_mime_types()
if mime_types is not None:
installed_icons_dir = \
os.path.join(xdg_data_home,
'icons/sugar/scalable/mimetypes')
if os.path.isdir(installed_icons_dir):
for f in os.listdir(installed_icons_dir):
path = os.path.join(installed_icons_dir, f)
if os.path.islink(path) and \
os.readlink(path).startswith(install_path):
os.remove(path)
if delete_profile:
bundle_profile_path = env.get_profile_path(self._bundle_id)
if os.path.exists(bundle_profile_path):
os.chmod(bundle_profile_path, 0775)
shutil.rmtree(bundle_profile_path, ignore_errors=True)
self._uninstall(install_path)
def is_user_activity(self):
return self.get_path().startswith(env.get_user_activities_path())
| lgpl-2.1 | 4,325,474,794,642,606,600 | 34.583333 | 78 | 0.560145 | false |
specify/specify7 | specifyweb/stored_queries/tests.py | 1 | 11203 | from sqlalchemy import orm
from unittest import skip
from specifyweb.specify.api_tests import ApiTests
from .queryfieldspec import QueryFieldSpec
from . import models
@skip("These tests are out of date.")
class StoredQueriesTests(ApiTests):
# def setUp(self):
# super(StoredQueriesTests, self).setUp()
# self.q = models.Spquery.objects.create(
# contextname='CollectionObject',
# contexttableid=1,
# name='test query',
# specifyuser=self.specifyuser)
def test_id_field(self):
self.assertEqual(models.Taxon._id, 'taxonId')
def test_basic(self):
fs = FieldSpec(field_name='lastName',
date_part=None,
root_table=models.CollectionObject,
join_path=[('cataloger', models.Agent)],
is_relation=False,
op_num=1,
value='Bentley',
negate=False,
display=True,
sort_type=0,
spqueryfieldid=None)
q, f = fs.add_to_query(orm.Query(models.CollectionObject.collectionObjectId))
sql = str(q)
self.assertTrue('WHERE agent_1."LastName" = :LastName_1' in sql)
def test_year_equal_predicate(self):
fs = FieldSpec(field_name='startDate',
date_part='year',
root_table=models.CollectionObject,
join_path=[('collectingEvent', models.CollectingEvent)],
is_relation=False,
op_num=1,
value='2000',
negate=False,
display=True,
sort_type=0,
spqueryfieldid=None)
q, f = fs.add_to_query(orm.Query(models.CollectionObject.collectionObjectId))
sql = str(q)
self.assertTrue('WHERE EXTRACT(year FROM collectingevent_1."StartDate") = :param_1' in sql)
def test_tree_predicate(self):
fs = FieldSpec(field_name='Family',
date_part=None,
root_table=models.Taxon,
join_path=[],
is_relation=False,
op_num=1,
value='Percidae',
negate=False,
display=True,
sort_type=0,
spqueryfieldid=None)
q, f = fs.add_to_query(orm.Query(models.Taxon.taxonId))
sql = str(q)
self.assertEqual(sql,
'SELECT taxon."TaxonID" AS "taxon_TaxonID" \n'
'FROM taxon '
'JOIN taxon AS taxon_1 '
'ON taxon."TaxonTreeDefID" = taxon_1."TaxonTreeDefID" '
'AND taxon."NodeNumber" BETWEEN taxon_1."NodeNumber" AND taxon_1."HighestChildNodeNumber" '
'JOIN taxontreedefitem AS taxontreedefitem_1 '
'ON taxontreedefitem_1."TaxonTreeDefItemID" = taxon_1."TaxonTreeDefItemID" \n'
'WHERE taxontreedefitem_1."Name" = :Name_1 AND taxon_1."Name" = :Name_2')
# def test_month_between_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=9,
# position=0,
# sorttype=0,
# startvalue='3,9',
# stringid='1,10.collectingevent.startDateNumericMonth',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE (MONTH("collectingevent"."StartDate") BETWEEN %s and %s AND '
# '"collectingevent"."StartDatePrecision" IN (%s, %s))' in sql)
# self.assertEqual(params, (3, 9, 1, 2))
def test_date_part_filter_combined(self):
fs1 = FieldSpec(field_name='startDate',
date_part='year',
root_table=models.CollectionObject,
join_path=[('collectingEvent', models.CollectingEvent)],
is_relation=False,
op_num=1,
value='2000',
negate=False,
display=True,
sort_type=0,
spqueryfieldid=None)
fs2 = FieldSpec(field_name='lastName',
date_part=None,
root_table=models.CollectionObject,
join_path=[('cataloger', models.Agent)],
is_relation=False,
op_num=1,
value='Bentley',
negate=False,
display=True,
sort_type=0,
spqueryfieldid=None)
q = orm.Query(models.CollectionObject.collectionObjectId)
q, f1 = fs1.add_to_query(q)
q, f2 = fs2.add_to_query(q)
sql = str(q)
self.assertTrue('agent_1."LastName" = :LastName_1' in sql)
self.assertTrue('EXTRACT(year FROM collectingevent_1."StartDate") = :param_1' in sql)
# def test_year_between_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=9,
# position=0,
# sorttype=0,
# startvalue='2000,1990',
# stringid='1,10.collectingevent.startDateNumericYear',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE YEAR("collectingevent"."StartDate") BETWEEN %s and %s' in sql)
# self.assertEqual(params, (2000, 1990))
# def test_year_in_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=10,
# position=0,
# sorttype=0,
# startvalue='2000,1990,1980',
# stringid='1,10.collectingevent.startDateNumericYear',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE YEAR("collectingevent"."StartDate") IN (%s, %s, %s)' in sql)
# self.assertEqual(params, (2000, 1990, 1980))
# def test_year_empty_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=12,
# position=0,
# sorttype=0,
# startvalue='ignored',
# stringid='1,10.collectingevent.startDateNumericYear',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE ("collectingevent"."StartDate" IS NULL)' in sql)
# def test_month_empty_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=12,
# position=0,
# sorttype=0,
# startvalue='ignored',
# stringid='1,10.collectingevent.startDateNumericMonth',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE ("collectingevent"."StartDate" IS NULL OR '
# 'NOT (("collectingevent"."StartDatePrecision" IN (%s, %s) AND '
# 'NOT ("collectingevent"."CollectingEventID" IS NULL) AND '
# '"collectingevent"."StartDatePrecision" IS NOT NULL)))' in sql)
# self.assertEqual(params, (1,2))
# def test_day_empty_predicate(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=12,
# position=0,
# sorttype=0,
# startvalue='ignored',
# stringid='1,10.collectingevent.startDateNumericDay',
# tablelist='1,10')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE ("collectingevent"."StartDate" IS NULL OR '
# 'NOT (("collectingevent"."StartDatePrecision" = %s AND '
# 'NOT ("collectingevent"."CollectingEventID" IS NULL) AND '
# '"collectingevent"."StartDatePrecision" IS NOT NULL)))' in sql)
# self.assertEqual(params, (1,))
# def test_aliased_columns_year(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=1,
# position=0,
# sorttype=0,
# startvalue='2000',
# stringid='1,9-determinations.determination.determinedDateNumericYear',
# tablelist='1,9-determinations')
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=1,
# position=0,
# sorttype=0,
# startvalue='1990',
# stringid='1,10,1-collectionObjects,9-determinations.determination.determinedDateNumericYear',
# tablelist='1,10,1-collectionObjects,9-determinations')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE (YEAR("determination"."DeterminedDate") = %s AND '
# 'YEAR(T5."DeterminedDate") = %s )' in sql)
# self.assertEqual(params, (2000, 1990))
# def test_aliased_columns_month(self):
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=1,
# position=0,
# sorttype=0,
# startvalue='7',
# stringid='1,9-determinations.determination.determinedDateNumericMonth',
# tablelist='1,9-determinations')
# self.q.fields.create(
# fieldname='startDate',
# isdisplay=True,
# isnot=False,
# operstart=1,
# position=0,
# sorttype=0,
# startvalue='8',
# stringid='1,10,1-collectionObjects,9-determinations.determination.determinedDateNumericMonth',
# tablelist='1,10,1-collectionObjects,9-determinations')
# qs = execute(self.q)
# sql, params = qs.query.sql_with_params()
# self.assertTrue('WHERE ((MONTH("determination"."DeterminedDate") = %s AND '
# '"determination"."DeterminedDatePrecision" IN (%s, %s)) AND '
# '(MONTH(T5."DeterminedDate") = %s AND '
# 'T5."DeterminedDatePrecision" IN (%s, %s)))' in sql)
# self.assertEqual(params, (7, 1, 2, 8, 1, 2))
| gpl-2.0 | 3,723,801,496,089,554,400 | 39.010714 | 116 | 0.508703 | false |
uclouvain/osis | learning_unit/ddd/domain/description_fiche.py | 1 | 2627 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import attr
@attr.s(slots=True)
class DescriptionFiche:
resume = attr.ib(type=str, default=None)
resume_en = attr.ib(type=str, default=None)
teaching_methods = attr.ib(type=str, default=None)
teaching_methods_en = attr.ib(type=str, default=None)
evaluation_methods = attr.ib(type=str, default=None)
evaluation_methods_en = attr.ib(type=str, default=None)
other_informations = attr.ib(type=str, default=None)
other_informations_en = attr.ib(type=str, default=None)
online_resources = attr.ib(type=str, default=None)
online_resources_en = attr.ib(type=str, default=None)
bibliography = attr.ib(type=str, default=None)
mobility = attr.ib(type=str, default=None)
last_update = attr.ib(type=datetime.datetime, default=None)
author = attr.ib(type=str, default=None)
@attr.s(slots=True)
class DescriptionFicheForceMajeure:
teaching_methods = attr.ib(type=str, default=None)
teaching_methods_en = attr.ib(type=str, default=None)
evaluation_methods = attr.ib(type=str, default=None)
evaluation_methods_en = attr.ib(type=str, default=None)
other_informations = attr.ib(type=str, default=None)
other_informations_en = attr.ib(type=str, default=None)
last_update = attr.ib(type=datetime.datetime, default=None)
author = attr.ib(type=str, default=None)
| agpl-3.0 | -6,738,978,772,855,739,000 | 45.070175 | 87 | 0.680883 | false |
jaor/bigmler | bigmler/anomaly/dispatcher.py | 1 | 10221 | # -*- coding: utf-8 -*-
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - anomaly subcommand processing dispatching
"""
import sys
import os
import bigml.api
from bigml.anomaly import Anomaly
import bigmler.utils as u
import bigmler.resourcesapi.common as r
import bigmler.resourcesapi.anomalies as ra
import bigmler.pre_model_steps as pms
import bigmler.processing.args as a
import bigmler.processing.anomalies as pa
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
from bigmler.resourcesapi.datasets import set_basic_dataset_args
from bigmler.resourcesapi.batch_anomaly_scores import \
set_batch_anomaly_score_args
from bigmler.defaults import DEFAULTS_FILE
from bigmler.anomaly_score import anomaly_score, remote_anomaly_score
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_context
from bigmler.dispatcher import SESSIONS_LOG, clear_log_files, get_test_dataset
from bigmler.options.anomaly import ANOMALIES_IN
COMMAND_LOG = ".bigmler_anomaly"
DIRS_LOG = ".bigmler_anomaly_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
EXCLUDE_TREES = "exclude=trees"
DEFAULT_OUTPUT = "anomaly_scores.csv"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def anomaly_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
command_args, _, api, session_file, resume = get_context(args,
SETTINGS)
# Selects the action to perform
if (a.has_train(command_args) or a.has_test(command_args) or
command_args.score or
a.has_anomaly(command_args)):
output_args = a.get_output_args(api, command_args, resume)
compute_output(**output_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def compute_output(api, args):
""" Creates one or more anomaly detectors using the `training_set`
or uses the ids of previously created BigML models to make
predictions for the `test_set`.
"""
anomaly = None
anomalies = None
# no multi-label support at present
# variables from command-line options
resume = args.resume_
anomaly_ids = args.anomaly_ids_
output = args.predictions
# there's only one anomaly detector to be generated at present
args.max_parallel_anomalies = 1
# anomalies cannot be published yet.
args.public_anomaly = False
# It is compulsory to have a description to publish either datasets or
# anomalies
if (not args.description_ and (args.public_anomaly or
args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
path = u.check_dir(output)
session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# basic pre-model step: creating or retrieving the source related info
source, resume, csv_properties, fields = pms.get_source_info(
api, args, resume, csv_properties, session_file, path, log)
# basic pre-model step: creating or retrieving the dataset related info
dataset_properties = pms.get_dataset_info(
api, args, resume, source,
csv_properties, fields, session_file, path, log)
(_, datasets, test_dataset, resume,
csv_properties, fields) = dataset_properties
if args.anomaly_file:
# anomaly is retrieved from the contents of the given local JSON file
anomaly, csv_properties, fields = u.read_local_resource(
args.anomaly_file,
csv_properties=csv_properties)
anomalies = [anomaly]
anomaly_ids = [anomaly['resource']]
else:
# anomaly is retrieved from the remote object
anomalies, anomaly_ids, resume = pa.anomalies_processing(
datasets, anomalies, anomaly_ids, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
if anomalies:
anomaly = anomalies[0]
# We update the anomaly's public state if needed
if anomaly:
if not a.has_test(args) and not args.anomalies_dataset:
query_string = MINIMUM_MODEL
elif not a.has_test(args):
query_string = ";".join([EXCLUDE_TREES, r.ALL_FIELDS_QS])
else:
query_string = r.ALL_FIELDS_QS
try:
anomaly_id = anomaly.get('resource', anomaly)
except AttributeError:
anomaly_id = anomaly
anomaly = u.check_resource(anomaly_id,
query_string=query_string,
api=api)
anomalies[0] = anomaly
if (args.public_anomaly or
(args.shared_flag and r.shared_changed(args.shared, anomaly))):
anomaly_args = {}
if args.shared_flag and r.shared_changed(args.shared, anomaly):
anomaly_args.update(shared=args.shared)
if args.public_anomaly:
anomaly_args.update(ra.set_publish_anomaly_args(args))
if anomaly_args:
anomaly = ra.update_anomaly(anomaly, anomaly_args, args,
api=api, path=path,
session_file=session_file)
anomalies[0] = anomaly
# We get the fields of the anomaly detector if we haven't got
# them yet and need them
if anomaly and (args.test_set or args.export_fields):
fields = pa.get_anomaly_fields(anomaly, csv_properties, args)
# If creating a top anomalies excluded/included dataset
if args.anomalies_dataset and anomaly:
origin_dataset = anomaly['object'].get('dataset')
if origin_dataset is None:
sys.exit("The dataset used to generate the anomaly detector "
"cannot be found. Failed to generate the anomalies "
" dataset.")
local_anomaly = Anomaly(anomaly)
include = args.anomalies_dataset == ANOMALIES_IN
args.anomaly_filter_ = local_anomaly.anomalies_filter(include=include)
_, resume = pd.create_new_dataset(
origin_dataset, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
# If predicting
if anomaly and args.score:
args.test_dataset = anomaly['object']['dataset']
if anomalies and (a.has_test(args) or (test_dataset and args.remote)):
# test dataset can be defined by --test-split or --test-dataset or
# --test-datasets
if test_dataset is None:
test_dataset = get_test_dataset(args)
# Remote anomaly scores: scores are computed as batch anomaly scores
# in bigml.com except when --no-batch flag is set on
if args.remote and not args.no_batch:
# create test source from file
test_name = "%s - test" % args.name
if args.test_source is None:
test_properties = ps.test_source_processing(
api, args, resume, name=test_name,
session_file=session_file, path=path, log=log)
(test_source, resume,
csv_properties, test_fields) = test_properties
else:
test_source_id = bigml.api.get_source_id(args.test_source)
test_source = api.check_resource(test_source_id)
if test_dataset is None:
# create test dataset from test source
dataset_args = set_basic_dataset_args(args, name=test_name)
test_dataset, resume = pd.alternative_dataset_processing(
test_source, "test", dataset_args, api, args,
resume, session_file=session_file, path=path, log=log)
else:
test_dataset_id = bigml.api.get_dataset_id(test_dataset)
test_dataset = api.check_resource(test_dataset_id)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
batch_anomaly_score_args = set_batch_anomaly_score_args(
args, fields=fields,
dataset_fields=test_fields)
remote_anomaly_score(anomaly, test_dataset,
batch_anomaly_score_args, args,
api, resume, prediction_file=output,
session_file=session_file, path=path, log=log)
else:
anomaly_score(anomalies, fields, args, session_file=session_file)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
| apache-2.0 | 6,740,342,672,061,636,000 | 40.048193 | 79 | 0.627532 | false |
Aplopio/document-converter | converters/utilities.py | 1 | 4138 | import sys
import re
import os
import shutil
import logging as log
sys.path.append('..')
from config import OUTPUT_FOLDER, UPLOAD_FOLDER
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TMP_DIR = os.path.join(PARENT_DIR, UPLOAD_FOLDER)
from html_pdf import HtmlPdf
from html_txt import HtmlTxt
from pdf_html import PdfHtml
from txt_html import TxtHtml
from doc_pdf import DocPdf
from ppt_pdf import PptPdf
from rtf_pdf import RtfPdf
from utils import get_file_extension
from file_manager import FileManager
AVAILABLE_CONVERTERS = [(HtmlPdf, 'htmlpdf'), (HtmlTxt, 'htmltxt'),
(PdfHtml, 'pdfhtml'), (TxtHtml, 'txthtml'),
(DocPdf, 'docpdf'), (PptPdf, 'pptpdf'),
(RtfPdf, 'rtfpdf'), ]
def class_selector(input_format, output_format, result=None):
result = result or []
if input_format == output_format:
return result
direct_converter = get_direct_converter(input_format, output_format)
if direct_converter:
result.append(direct_converter)
return result
input_regex = make_regex(input_format)
input_matches = get_input_matches(input_regex)
for input_match in input_matches:
converter, converter_expression = input_match
intermediate_format = get_intermediate_format(converter_expression,
input_format)
result.append(input_match)
converter_list = class_selector(intermediate_format, output_format,
result)
if converter_list:
return converter_list
else:
result.pop()
def get_intermediate_format(converter_expression, input_format):
return re.sub(input_format, '', converter_expression)
def get_input_matches(input_regex):
return [(converter, expression)
for converter, expression in AVAILABLE_CONVERTERS
if input_regex.match(expression)]
def make_regex(format_string):
return re.compile('^%s'%format_string)
def get_direct_converter(input_format, output_format):
converter_expression = '%s%s'%(input_format, output_format)
for converter, expression in AVAILABLE_CONVERTERS:
if re.match(converter_expression, expression):
return (converter, expression)
def get_input_format(input_files_objects):
sample_input_file = input_files_objects[0].get_input_file_path()
input_format = get_file_extension(sample_input_file)
return input_format
def set_flags_of_file_objects(input_files_objects, output_files_objects):
for input_file_object, output_file_object in zip(input_files_objects,
output_files_objects):
if (not output_file_object) or output_file_object == input_file_object:
input_file_object.converted = False
else:
output_file_name = os.path.basename(
output_file_object.get_input_file_path())
os.system('mv %s %s' % (
output_file_object.get_input_file_path(), OUTPUT_FOLDER))
input_file_object.set_output_file_path(
os.path.join(OUTPUT_FOLDER, output_file_name))
input_file_object.converted = True
return input_files_objects
def get_files_objects(files_paths):
files_objects = []
for file_path in files_paths:
if file_path:
file_object = FileManager(None, input_file_path=file_path)
files_objects.append(file_object)
else:
files_objects.append(None)
return files_objects
def handle_failed_conversion(input_file):
if not input_file or not os.path.isfile(input_file):
return
failed_conversion_dir = os.path.join(TMP_DIR, 'failed_conversions')
if not os.path.isdir(failed_conversion_dir):
os.makedirs(failed_conversion_dir)
filename = os.path.basename(input_file)
try:
shutil.copyfile(input_file, os.path.join(failed_conversion_dir,
filename))
except IOError, ie:
log.error(ie)
| mit | 4,393,052,712,470,979,600 | 33.483333 | 79 | 0.639198 | false |
codilime/veles | python/veles/scli/client.py | 1 | 13837 | # Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import socket
import ssl
import msgpack
from veles.proto import messages, msgpackwrap
from veles.proto.messages import PROTO_VERSION
from veles.schema import nodeid
from veles.util import helpers
class Client(object):
def __init__(self, sock, key, name='scli', version='1.0',
description='', type='scli', quit_on_close=False):
self.sock = sock
wrapper = msgpackwrap.MsgpackWrapper()
self.unpacker = wrapper.unpacker
self.packer = wrapper.packer
self.client_name = name
self.client_version = version
self.client_description = description
self.client_type = type
self.quit_on_close = quit_on_close
self._authorize(helpers.prepare_auth_key(key))
def _authorize(self, key):
self.sock.sendall(key)
self.send_msg(messages.MsgConnect(
proto_version=PROTO_VERSION,
client_name=self.client_name,
client_version=self.client_version,
client_description=self.client_description,
client_type=self.client_type,
quit_on_close=self.quit_on_close,
))
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnected):
print('Connected to server: {}'.format(pkt.server_name))
elif isinstance(pkt, messages.MsgConnectionError):
raise pkt.err
else:
print(pkt)
raise Exception('weird reply when attempting to connect')
def getpkt(self):
while True:
try:
return messages.MsgpackMsg.load(self.unpacker.unpack())
except msgpack.OutOfData:
pass
data = self.sock.recv(1024)
if not data:
raise Exception("end of file")
self.unpacker.feed(data)
def send_msg(self, msg):
self.sock.sendall(self.packer.pack(msg.dump()))
def request(self, msg):
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgRequestAck) and pkt.rid == 0:
return msg.id
elif isinstance(pkt, messages.MsgRequestError) and pkt.rid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to request')
def create(self, parent, tags=set(), attr={}, data={}, bindata={},
pos=(None, None)):
msg = messages.MsgCreate(
id=nodeid.NodeID(),
parent=parent,
pos_start=pos[0],
pos_end=pos[1],
tags=tags,
attr=attr,
data=data,
bindata=bindata,
rid=0,
)
self.request(msg)
return msg.id
def delete(self, obj):
msg = messages.MsgDelete(
id=obj,
rid=0
)
self.request(msg)
def set_parent(self, obj, parent):
msg = messages.MsgSetParent(
id=obj,
parent=parent,
rid=0
)
self.request(msg)
def set_pos(self, obj, start, end):
msg = messages.MsgSetPos(
id=obj,
pos_start=start,
pos_end=end,
rid=0
)
self.request(msg)
def add_tag(self, obj, tag):
msg = messages.MsgAddTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def del_tag(self, obj, tag):
msg = messages.MsgDelTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def set_attr(self, obj, key, data):
msg = messages.MsgSetAttr(
id=obj,
key=key,
data=data,
rid=0
)
self.request(msg)
def set_data(self, obj, key, data):
msg = messages.MsgSetData(
id=obj,
rid=0,
key=key,
data=data,
)
self.request(msg)
def set_bindata(self, obj, key, start, data, truncate=False):
msg = messages.MsgSetBinData(
id=obj,
rid=0,
key=key,
start=start,
data=data,
truncate=truncate,
)
self.request(msg)
def get(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
return pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_sub(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
yield pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_data(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_data_sub(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_bindata(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def get_bindata_sub(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def list(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
return pkt.objs
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def list_sub(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def query(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def query_sub(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
yield sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def run_method(self, obj, sig, params):
params = sig.params.dump(params)
msg = messages.MsgMethodRun(
mid=0,
node=obj,
method=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgMethodResult) and pkt.mid == 0:
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgMethodError) and pkt.mid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to run_method')
def run_broadcast(self, sig, params):
params = sig.params.dump(params)
msg = messages.MsgBroadcastRun(
bid=0,
broadcast=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgBroadcastResult) and pkt.bid == 0:
return [sig.result.load(result) for result in pkt.results]
else:
print(pkt)
raise Exception('weird reply to run_broadcast')
def list_connections(self):
msg = messages.MsgListConnections(
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
return pkt.connections
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
def list_connections_sub(self):
msg = messages.MsgListConnections(
qid=0,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
class UnixClient(Client):
def __init__(self, path, key, **kwargs):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(path)
super(UnixClient, self).__init__(sock, key, **kwargs)
class TcpClient(Client):
def __init__(self, ip, port, key, **kwargs):
sock = socket.create_connection((ip, port))
super(TcpClient, self).__init__(sock, key, **kwargs)
class SslClient(Client):
def __init__(self, ip, port, key, fingerprint, **kwargs):
sock = socket.create_connection((ip, port))
sc = ssl.SSLContext()
sock = sc.wrap_socket(sock)
cert = sock.getpeercert(True)
helpers.validate_cert(cert, fingerprint)
super(SslClient, self).__init__(sock, key, **kwargs)
def create_client(url):
url = helpers.parse_url(url)
if url.scheme == helpers.UrlScheme.UNIX_SCHEME:
return UnixClient(url.path, url.auth_key)
elif url.scheme == helpers.UrlScheme.TCP_SCHEME:
return TcpClient(url.host, url.port, url.auth_key)
elif url.scheme == helpers.UrlScheme.SSL_SCHEME:
return SslClient(url.host, url.port, url.auth_key, url.fingerprint)
else:
raise ValueError('Wrong scheme provided!')
| apache-2.0 | -1,839,075,806,865,339,100 | 30.094382 | 78 | 0.539206 | false |
askin/GNazar | GNazar/gnazar.py | 1 | 6314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under GPL v2
# Copyright 2010, Aşkın Yollu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import pygtk
import gtk
import gettext
import pynotify
import time
import os
import sys
import locale
import random
import platform
gtk.gdk.threads_init()
#Translation stuff
localedir = "/usr/share/gnazar/locale"
gettext.bindtextdomain('gnazar', localedir)
gettext.textdomain('gnazar')
sharedirs = '/usr/share'
_ = gettext.gettext
class GNazar():
def __init__(self):
# create a new Status Icon
self.gnazar = gtk.StatusIcon()
self.gnazar.set_from_file(
'%s/icons/hicolor/22x22/apps/gnazar-deactive.png' % sharedirs)
self.gnazar.set_tooltip(
_("GNazar - You are completely demilitarized..."))
self.gnazar.set_visible(True)
self.status = False
# create menu
self.menu = gtk.Menu()
self.gnazar.connect("popup_menu", self.show_menu)
# connect
_quit = gtk.ImageMenuItem(gtk.STOCK_QUIT)
_quit.connect("activate", self.destroy)
_about = gtk.ImageMenuItem(gtk.STOCK_ABOUT)
_about.connect("activate", self.show_about)
_protect = gtk.ImageMenuItem(gtk.STOCK_OK)
_protect.connect("activate", self.protect)
_protect.set_label(_("Protect"))
_release = gtk.ImageMenuItem(gtk.STOCK_CANCEL)
_release.set_label(_("Release"))
_release.connect("activate", self.release)
# add to menu
self.menu.add(_protect)
self.menu.add(_release)
self.menu.add(_about)
self.menu.add(_quit)
self.menu.show_all()
# notification
pynotify.init(_("GNazar Application"))
# init attack
self.total_attack = 0
self.defated_attack = 0
self.running = True
import thread
thread.start_new_thread(self._notification, ())
def main(self):
# gtk main
gtk.main()
'''
show popup menu
'''
def show_menu(self, status_icon, button, activate_time):
self.menu.popup(None, None, gtk.status_icon_position_menu,
button, activate_time, status_icon)
# random notification
def _notification(self):
while(self.running):
time.sleep(random.randrange(3600, 18000))
#time.sleep(4) # testing
self.notification()
'''
show about
'''
def show_about(self, widget):
about = gtk.AboutDialog()
about.set_program_name("GNazar")
about.set_icon_from_file("%s/icons/hicolor/22x22/apps/gnazar.png"
% sharedirs)
about.set_version("0.1")
about.set_copyright("(c) Aşkın Yollu")
# FIXME: make it generic (mac, bsd, win etc..)
dist_name = platform.dist()[0]
about.set_comments(_("GNazar is a useful part of the %s" % dist_name))
about.set_website("http://www.askin.ws")
about.set_logo(gtk.gdk.pixbuf_new_from_file(
"%s/icons/hicolor/32x32/apps/gnazar.png" % sharedirs))
about.set_translator_credits(_("TRANSLATORS"))
about.set_artists([_("THANKSFORICONS")])
about.run()
about.destroy()
# destroy callback
def destroy(self, widget):
self.gnazar.set_visible(False)
self.running = False
gtk.main_quit()
# popup callback
def protect(self, widget):
if self.status == False:
dialog = gtk.MessageDialog(
parent=None,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format=_("GNazar is starting to protect your "
"computer from harmful looks...")
)
dialog.set_title(_("GNazar Application"))
dialog.connect('response', self.dialog_destroyer)
dialog.show()
self.status = True
self.gnazar.set_tooltip(_("GNazar - No harmful look allowed!"))
self.gnazar.set_from_file("%s/icons/hicolor/22x22/apps/gnazar.png"
% sharedirs)
def release(self, widget):
if self.status == True:
dialog = gtk.MessageDialog(
parent=None,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_WARNING,
buttons=gtk.BUTTONS_OK,
message_format=_("GNazar is stopping to protect your computer"
" from harmful looks...")
)
dialog.set_title(_("GNazar Application"))
dialog.connect('response', self.dialog_destroyer)
dialog.show()
self.status = False
self.gnazar.set_tooltip(
_("GNazar - You are completely demilitarized..."))
self.gnazar.set_from_file(
"%s/icons/hicolor/22x22/apps/gnazar-deactive.png" % sharedirs)
def notification(self):
self.total_attack += 1
if self.status == True:
self.defated_attack += 1
title = _("Nazar eliminated")
body = _("Nazar Received and eliminated successfuly")
icon = "gtk-apply"
else:
title = _("Nazar harmed")
body = _("Nazar Received and it HARMED!")
icon = "dialog-warning"
self.gnazar.set_tooltip(
_("GNazar - %s attacks received so far, %s"
" are defated and %s are received...") %
(self.total_attack,
self.defated_attack,
self.total_attack - self.defated_attack))
notify = pynotify.Notification(title, body, icon)
notify.set_urgency(pynotify.URGENCY_NORMAL)
notify.set_timeout(pynotify.EXPIRES_NEVER)
notify.show()
def dialog_destroyer(self, dialog, widget):
dialog.destroy()
def main():
si = GNazar()
si.main()
| gpl-2.0 | 1,321,648,834,766,344,000 | 31.525773 | 79 | 0.572266 | false |
laurmurclar/mitmproxy | mitmproxy/tools/console/flowview.py | 1 | 23737 | import math
import os
import sys
from functools import lru_cache
from typing import Optional, Union # noqa
import urwid
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import export
from mitmproxy import http
from mitmproxy.net.http import Headers
from mitmproxy.net.http import status_codes
from mitmproxy.tools.console import common
from mitmproxy.tools.console import flowdetailview
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import searchable
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import tabs
class SearchError(Exception):
pass
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("C", "export flow to clipboard"),
("D", "duplicate flow"),
("d", "delete flow"),
("e", "edit request/response"),
("f", "load full body data"),
("m", "change body display mode for this entity\n(default mode can be changed in the options)"),
(None,
common.highlight_key("automatic", "a") +
[("text", ": automatic detection")]
),
(None,
common.highlight_key("hex", "e") +
[("text", ": Hex")]
),
(None,
common.highlight_key("html", "h") +
[("text", ": HTML")]
),
(None,
common.highlight_key("image", "i") +
[("text", ": Image")]
),
(None,
common.highlight_key("javascript", "j") +
[("text", ": JavaScript")]
),
(None,
common.highlight_key("json", "s") +
[("text", ": JSON")]
),
(None,
common.highlight_key("urlencoded", "u") +
[("text", ": URL-encoded data")]
),
(None,
common.highlight_key("raw", "r") +
[("text", ": raw data")]
),
(None,
common.highlight_key("xml", "x") +
[("text", ": XML")]
),
("E", "export flow to file"),
("r", "replay request"),
("V", "revert changes to request"),
("v", "view body in external viewer"),
("w", "save all flows matching current view filter"),
("W", "save this flow"),
("x", "delete body"),
("z", "encode/decode a request/response"),
("tab", "next tab"),
("h, l", "previous tab, next tab"),
("space", "next flow"),
("|", "run script on this flow"),
("/", "search (case sensitive)"),
("n", "repeat search forward"),
("N", "repeat search backwards"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
('heading_key', "q"), ":back ",
]
class FlowViewHeader(urwid.WidgetWrap):
def __init__(self, master: "mitmproxy.console.master.ConsoleMaster", f: http.HTTPFlow):
self.master = master
self.flow = f
self._w = common.format_flow(
f,
False,
extended=True,
hostheader=self.master.options.showhost
)
signals.flow_change.connect(self.sig_flow_change)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self._w = common.format_flow(
flow,
False,
extended=True,
hostheader=self.master.options.showhost
)
TAB_REQ = 0
TAB_RESP = 1
class FlowView(tabs.Tabs):
highlight_color = "focusfield"
def __init__(self, master, view, flow, tab_offset):
self.master, self.view, self.flow = master, view, flow
super().__init__(
[
(self.tab_request, self.view_request),
(self.tab_response, self.view_response),
(self.tab_details, self.view_details),
],
tab_offset
)
self.show()
self.last_displayed_body = None
signals.flow_change.connect(self.sig_flow_change)
def tab_request(self):
if self.flow.intercepted and not self.flow.response:
return "Request intercepted"
else:
return "Request"
def tab_response(self):
if self.flow.intercepted and self.flow.response:
return "Response intercepted"
else:
return "Response"
def tab_details(self):
return "Detail"
def view_request(self):
return self.conn_text(self.flow.request)
def view_response(self):
return self.conn_text(self.flow.response)
def view_details(self):
return flowdetailview.flowdetails(self.view, self.flow)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self.show()
def content_view(self, viewmode, message):
if message.raw_content is None:
msg, body = "", [urwid.Text([("error", "[content missing]")])]
return msg, body
else:
s = self.view.settings[self.flow]
full = s.get((self.tab_offset, "fullcontents"), False)
if full:
limit = sys.maxsize
else:
limit = contentviews.VIEW_CUTOFF
flow_modify_cache_invalidation = hash((
message.raw_content,
message.headers.fields,
getattr(message, "path", None),
))
# we need to pass the message off-band because it's not hashable
self._get_content_view_message = message
return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation)
@lru_cache(maxsize=200)
def _get_content_view(self, viewmode, max_lines, _):
message = self._get_content_view_message
self._get_content_view_message = None
description, lines, error = contentviews.get_message_content_view(
viewmode, message
)
if error:
signals.add_log(error, "error")
# Give hint that you have to tab for the response.
if description == "No content" and isinstance(message, http.HTTPRequest):
description = "No request content (press tab to view response)"
# If the users has a wide terminal, he gets fewer lines; this should not be an issue.
chars_per_line = 80
max_chars = max_lines * chars_per_line
total_chars = 0
text_objects = []
for line in lines:
txt = []
for (style, text) in line:
if total_chars + len(text) > max_chars:
text = text[:max_chars - total_chars]
txt.append((style, text))
total_chars += len(text)
if total_chars == max_chars:
break
# round up to the next line.
total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)
text_objects.append(urwid.Text(txt))
if total_chars == max_chars:
text_objects.append(urwid.Text([
("highlight", "Stopped displaying data after %d lines. Press " % max_lines),
("key", "f"),
("highlight", " to load all data.")
]))
break
return description, text_objects
def viewmode_get(self):
override = self.view.settings[self.flow].get(
(self.tab_offset, "prettyview"),
None
)
return self.master.options.default_contentview if override is None else override
def conn_text(self, conn):
if conn:
txt = common.format_keyvals(
[(h + ":", v) for (h, v) in conn.headers.items(multi=True)],
key = "header",
val = "text"
)
viewmode = self.viewmode_get()
msg, body = self.content_view(viewmode, conn)
cols = [
urwid.Text(
[
("heading", msg),
]
),
urwid.Text(
[
" ",
('heading', "["),
('heading_key', "m"),
('heading', (":%s]" % viewmode)),
],
align="right"
)
]
title = urwid.AttrWrap(urwid.Columns(cols), "heading")
txt.append(title)
txt.extend(body)
else:
txt = [
urwid.Text(""),
urwid.Text(
[
("highlight", "No response. Press "),
("key", "e"),
("highlight", " and edit any aspect to add one."),
]
)
]
return searchable.Searchable(self.view, txt)
def set_method_raw(self, m):
if m:
self.flow.request.method = m
signals.flow_change.send(self, flow = self.flow)
def edit_method(self, m):
if m == "e":
signals.status_prompt.send(
prompt = "Method",
text = self.flow.request.method,
callback = self.set_method_raw
)
else:
for i in common.METHOD_OPTIONS:
if i[1] == m:
self.flow.request.method = i[0].upper()
signals.flow_change.send(self, flow = self.flow)
def set_url(self, url):
request = self.flow.request
try:
request.url = str(url)
except ValueError:
return "Invalid URL."
signals.flow_change.send(self, flow = self.flow)
def set_resp_status_code(self, status_code):
try:
status_code = int(status_code)
except ValueError:
return None
self.flow.response.status_code = status_code
if status_code in status_codes.RESPONSES:
self.flow.response.reason = status_codes.RESPONSES[status_code]
signals.flow_change.send(self, flow = self.flow)
def set_resp_reason(self, reason):
self.flow.response.reason = reason
signals.flow_change.send(self, flow = self.flow)
def set_headers(self, fields, conn):
conn.headers = Headers(fields)
signals.flow_change.send(self, flow = self.flow)
def set_query(self, lst, conn):
conn.query = lst
signals.flow_change.send(self, flow = self.flow)
def set_path_components(self, lst, conn):
conn.path_components = lst
signals.flow_change.send(self, flow = self.flow)
def set_form(self, lst, conn):
conn.urlencoded_form = lst
signals.flow_change.send(self, flow = self.flow)
def edit_form(self, conn):
self.master.view_grideditor(
grideditor.URLEncodedFormEditor(
self.master,
conn.urlencoded_form.items(multi=True),
self.set_form,
conn
)
)
def edit_form_confirm(self, key, conn):
if key == "y":
self.edit_form(conn)
def set_cookies(self, lst, conn):
conn.cookies = lst
signals.flow_change.send(self, flow = self.flow)
def set_setcookies(self, data, conn):
conn.cookies = data
signals.flow_change.send(self, flow = self.flow)
def edit(self, part):
if self.tab_offset == TAB_REQ:
message = self.flow.request
else:
if not self.flow.response:
self.flow.response = http.HTTPResponse.make(200, b"")
message = self.flow.response
self.flow.backup()
if message == self.flow.request and part == "c":
self.master.view_grideditor(
grideditor.CookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_cookies,
message
)
)
if message == self.flow.response and part == "c":
self.master.view_grideditor(
grideditor.SetCookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_setcookies,
message
)
)
if part == "r":
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
c = self.master.spawn_editor(message.get_content(strict=False) or b"")
message.content = c.rstrip(b"\n")
elif part == "f":
if not message.urlencoded_form and message.raw_content:
signals.status_prompt_onekey.send(
prompt = "Existing body is not a URL-encoded form. Clear and edit?",
keys = [
("yes", "y"),
("no", "n"),
],
callback = self.edit_form_confirm,
args = (message,)
)
else:
self.edit_form(message)
elif part == "h":
self.master.view_grideditor(
grideditor.HeaderEditor(
self.master,
message.headers.fields,
self.set_headers,
message
)
)
elif part == "p":
p = message.path_components
self.master.view_grideditor(
grideditor.PathEditor(
self.master,
p,
self.set_path_components,
message
)
)
elif part == "q":
self.master.view_grideditor(
grideditor.QueryEditor(
self.master,
message.query.items(multi=True),
self.set_query, message
)
)
elif part == "u":
signals.status_prompt.send(
prompt = "URL",
text = message.url,
callback = self.set_url
)
elif part == "m" and message == self.flow.request:
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.edit_method
)
elif part == "o":
signals.status_prompt.send(
prompt = "Code",
text = str(message.status_code),
callback = self.set_resp_status_code
)
elif part == "m" and message == self.flow.response:
signals.status_prompt.send(
prompt = "Message",
text = message.reason,
callback = self.set_resp_reason
)
signals.flow_change.send(self, flow = self.flow)
def view_flow(self, flow):
signals.pop_view_state.send(self)
self.master.view_flow(flow, self.tab_offset)
def _view_nextprev_flow(self, idx, flow):
if not self.view.inbounds(idx):
signals.status_message.send(message="No more flows")
return
self.view_flow(self.view[idx])
def view_next_flow(self, flow):
return self._view_nextprev_flow(self.view.index(flow) + 1, flow)
def view_prev_flow(self, flow):
return self._view_nextprev_flow(self.view.index(flow) - 1, flow)
def change_this_display_mode(self, t):
view = contentviews.get_by_shortcut(t)
if view:
self.view.settings[self.flow][(self.tab_offset, "prettyview")] = view.name
else:
self.view.settings[self.flow][(self.tab_offset, "prettyview")] = None
signals.flow_change.send(self, flow=self.flow)
def keypress(self, size, key):
conn = None # type: Optional[Union[http.HTTPRequest, http.HTTPResponse]]
if self.tab_offset == TAB_REQ:
conn = self.flow.request
elif self.tab_offset == TAB_RESP:
conn = self.flow.response
key = super().keypress(size, key)
# Special case: Space moves over to the next flow.
# We need to catch that before applying common.shortcuts()
if key == " ":
self.view_next_flow(self.flow)
return
key = common.shortcuts(key)
if key in ("up", "down", "page up", "page down"):
# Pass scroll events to the wrapped widget
self._w.keypress(size, key)
elif key == "a":
self.flow.resume()
self.master.view.update(self.flow)
elif key == "A":
for f in self.view:
if f.intercepted:
f.resume()
self.master.view.update(self.flow)
elif key == "d":
if self.flow.killable:
self.flow.kill()
self.view.remove(self.flow)
if not self.view.focus.flow:
self.master.view_flowlist()
else:
self.view_flow(self.view.focus.flow)
elif key == "D":
cp = self.flow.copy()
self.master.view.add(cp)
self.master.view.focus.flow = cp
self.view_flow(cp)
signals.status_message.send(message="Duplicated.")
elif key == "p":
self.view_prev_flow(self.flow)
elif key == "r":
try:
self.master.replay_request(self.flow)
except exceptions.ReplayException as e:
signals.add_log("Replay error: %s" % e, "warn")
signals.flow_change.send(self, flow = self.flow)
elif key == "V":
if self.flow.modified():
self.flow.revert()
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Reverted.")
else:
signals.status_message.send(message="Flow not modified.")
elif key == "W":
signals.status_prompt_path.send(
prompt = "Save this flow",
callback = self.master.save_one_flow,
args = (self.flow,)
)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
elif key == "e":
if self.tab_offset == TAB_REQ:
signals.status_prompt_onekey.send(
prompt="Edit request",
keys=(
("cookies", "c"),
("query", "q"),
("path", "p"),
("url", "u"),
("header", "h"),
("form", "f"),
("raw body", "r"),
("method", "m"),
),
callback=self.edit
)
elif self.tab_offset == TAB_RESP:
signals.status_prompt_onekey.send(
prompt="Edit response",
keys=(
("cookies", "c"),
("code", "o"),
("message", "m"),
("header", "h"),
("raw body", "r"),
),
callback=self.edit
)
else:
signals.status_message.send(
message="Tab to the request or response",
expire=1
)
elif key in set("bfgmxvzEC") and not conn:
signals.status_message.send(
message = "Tab to the request or response",
expire = 1
)
return
elif key == "b":
if self.tab_offset == TAB_REQ:
common.ask_save_body("q", self.flow)
else:
common.ask_save_body("s", self.flow)
elif key == "f":
self.view.settings[self.flow][(self.tab_offset, "fullcontents")] = True
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Loading all body data...")
elif key == "m":
p = list(contentviews.view_prompts)
p.insert(0, ("Clear", "C"))
signals.status_prompt_onekey.send(
self,
prompt = "Display mode",
keys = p,
callback = self.change_this_display_mode
)
elif key == "E":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to file",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.ask_save_path)
)
elif key == "C":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to clipboard",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.copy_to_clipboard_or_prompt)
)
elif key == "x":
conn.content = None
signals.flow_change.send(self, flow=self.flow)
elif key == "v":
if conn.raw_content:
t = conn.headers.get("content-type")
if "EDITOR" in os.environ or "PAGER" in os.environ:
self.master.spawn_external_viewer(conn.get_content(strict=False), t)
else:
signals.status_message.send(
message = "Error! Set $EDITOR or $PAGER."
)
elif key == "z":
self.flow.backup()
e = conn.headers.get("content-encoding", "identity")
if e != "identity":
try:
conn.decode()
except ValueError:
signals.status_message.send(
message = "Could not decode - invalid data?"
)
else:
signals.status_prompt_onekey.send(
prompt = "Select encoding: ",
keys = (
("gzip", "z"),
("deflate", "d"),
("brotli", "b"),
),
callback = self.encode_callback,
args = (conn,)
)
signals.flow_change.send(self, flow = self.flow)
else:
# Key is not handled here.
return key
def encode_callback(self, key, conn):
encoding_map = {
"z": "gzip",
"d": "deflate",
"b": "br",
}
conn.encode(encoding_map[key])
signals.flow_change.send(self, flow = self.flow)
| mit | 642,012,547,737,181,300 | 33.551674 | 104 | 0.487256 | false |
kuzmoyev/Google-Calendar-Simple-API | tests/test_attachment.py | 1 | 3788 | from unittest import TestCase
from gcsa.attachment import Attachment
from gcsa.serializers.attachment_serializer import AttachmentSerializer
DOC_URL = 'https://docs.google.com/document/d/1uDvwcxOsXkzl2Bod0YIfrIQ5MqfBhnc1jusYdH1xCZo/edit?usp=sharing'
class TestAttachment(TestCase):
def test_create(self):
attachment = Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.document")
self.assertEqual(attachment.title, 'My doc')
with self.assertRaises(ValueError):
Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.something")
class TestAttachmentSerializer(TestCase):
def test_to_json(self):
attachment = Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.document")
attachment_json = {
'title': 'My doc',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.document"
}
self.assertDictEqual(AttachmentSerializer.to_json(attachment), attachment_json)
attachment = Attachment('My doc2',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.drawing",
icon_link="https://some_link.com",
file_id='abc123')
attachment_json = {
'title': 'My doc2',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.drawing",
'iconLink': "https://some_link.com",
'fileId': 'abc123'
}
serializer = AttachmentSerializer(attachment)
self.assertDictEqual(serializer.get_json(), attachment_json)
def test_to_object(self):
attachment_json = {
'title': 'My doc',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.document"
}
attachment = AttachmentSerializer.to_object(attachment_json)
self.assertEqual(attachment.title, 'My doc')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.document")
self.assertIsNone(attachment.icon_link)
self.assertIsNone(attachment.file_id)
attachment_json = {
'title': 'My doc2',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.drawing",
'iconLink': "https://some_link.com",
'fileId': 'abc123'
}
serializer = AttachmentSerializer(attachment_json)
attachment = serializer.get_object()
self.assertEqual(attachment.title, 'My doc2')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing")
self.assertEqual(attachment.icon_link, "https://some_link.com")
self.assertEqual(attachment.file_id, 'abc123')
attachment_json_str = """{
"title": "My doc3",
"fileUrl": "%s",
"mimeType": "application/vnd.google-apps.drawing",
"iconLink": "https://some_link.com",
"fileId": "abc123"
}
""" % DOC_URL
attachment = AttachmentSerializer.to_object(attachment_json_str)
self.assertEqual(attachment.title, 'My doc3')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing")
self.assertEqual(attachment.icon_link, "https://some_link.com")
self.assertEqual(attachment.file_id, 'abc123')
| mit | 5,226,027,302,797,445,000 | 38.873684 | 108 | 0.587381 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_web_application_firewall_policies_operations.py | 1 | 20908 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def get(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
policy_name, # type: str
parameters, # type: "_models.WebApplicationFirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
| mit | -6,169,865,617,561,209,000 | 48.079812 | 215 | 0.643486 | false |
abhinavsingh/proxy.py | examples/websocket_client.py | 1 | 1465 | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import time
from proxy.http.websocket import WebsocketClient, WebsocketFrame, websocketOpcodes
# globals
client: WebsocketClient
last_dispatch_time: float
static_frame = memoryview(WebsocketFrame.text(b'hello'))
num_echos = 10
def on_message(frame: WebsocketFrame) -> None:
"""WebsocketClient on_message callback."""
global client, num_echos, last_dispatch_time
print('Received %r after %d millisec' %
(frame.data, (time.time() - last_dispatch_time) * 1000))
assert(frame.data == b'hello' and frame.opcode ==
websocketOpcodes.TEXT_FRAME)
if num_echos > 0:
client.queue(static_frame)
last_dispatch_time = time.time()
num_echos -= 1
else:
client.close()
if __name__ == '__main__':
# Constructor establishes socket connection
client = WebsocketClient(
b'echo.websocket.org',
80,
b'/',
on_message=on_message)
# Perform handshake
client.handshake()
# Queue some data for client
client.queue(static_frame)
last_dispatch_time = time.time()
# Start event loop
client.run()
| bsd-3-clause | -4,967,191,948,487,152,000 | 28.18 | 86 | 0.654558 | false |
sebalas/fake-useragent | fake_useragent/utils.py | 1 | 2970 | import os
import re
from . import settings
try: # Python 2
from urllib import urlopen, quote_plus
except ImportError: # Python 3
from urllib.request import urlopen
from urllib.parse import quote_plus
try:
import json
except ImportError:
import simplejson as json
def get(url, annex=None):
if annex is not None:
url = url % (quote_plus(annex), )
return urlopen(url).read()
def get_browsers():
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSERS_STATS_PAGE)
html = html.decode('windows-1252')
html = html.split('<table class="reference notranslate">')[1]
html = html.split('</table>')[0]
browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE)
for value, override in settings.OVERRIDES:
browsers = [
value if browser == override else browser
for browser in browsers
]
browsers_statistics = re.findall(
r'td\sclass="right">(.+?)\s', html, re.UNICODE
)
# TODO: ensure encoding
return list(zip(browsers, browsers_statistics))
def get_browser_versions(browser):
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSER_BASE_PAGE, browser)
html = html.decode('iso-8859-1')
html = html.split('<div id=\'liste\'>')[1]
html = html.split('</div>')[0]
browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE)
count = 0
browsers = []
for browser in browsers_iter:
if 'more' in browser.group(1).lower():
continue
# TODO: ensure encoding
browsers.append(browser.group(1))
count += 1
if count == settings.BROWSERS_COUNT_LIMIT:
break
return browsers
def load():
browsers_dict = {}
randomize_dict = {}
for item in get_browsers():
browser, percent = item
browser_key = browser
for replacement in settings.REPLACEMENTS:
browser_key = browser_key.replace(replacement, '')
browser_key = browser_key.lower()
browsers_dict[browser_key] = get_browser_versions(browser)
for counter in range(int(float(percent))):
randomize_dict[str(len(randomize_dict))] = browser_key
db = {}
db['browsers'] = browsers_dict
db['randomize'] = randomize_dict
return db
def write(data):
data = json.dumps(data, ensure_ascii=False)
# no codecs\with for python 2.5
f = open(settings.DB, 'w+')
f.write(data)
f.close()
def read():
# no codecs\with for python 2.5
f = open(settings.DB, 'r')
data = f.read()
f.close()
return json.loads(data)
def exist():
return os.path.isfile(settings.DB)
def rm():
if exist():
os.remove(settings.DB)
def update():
if exist():
rm()
write(load())
def load_cached():
if not exist():
update()
return read()
| apache-2.0 | 6,406,799,067,328,781,000 | 19.915493 | 70 | 0.6 | false |
Daeinar/norx-py | norx.py | 1 | 7942 | """
Python2 implementation of NORX.
------
:author: Philipp Jovanovic <[email protected]>, 2014-2015.
:license: CC0, see LICENSE for more details.
"""
from struct import pack, unpack
class NORX(object):
def __init__(self, w=64, r=4, d=1, t=256):
assert w in [32, 64]
assert r >= 1
assert d >= 0
assert 10 * w >= t >= 0
self.NORX_W = w
self.NORX_R = r
self.NORX_D = d
self.NORX_T = t
self.NORX_N = w * 2
self.NORX_K = w * 4
self.NORX_B = w * 16
self.NORX_C = w * 6
self.RATE = self.NORX_B - self.NORX_C
self.HEADER_TAG = 1 << 0
self.PAYLOAD_TAG = 1 << 1
self.TRAILER_TAG = 1 << 2
self.FINAL_TAG = 1 << 3
self.BRANCH_TAG = 1 << 4
self.MERGE_TAG = 1 << 5
self.BYTES_WORD = w / 8
self.BYTES_TAG = t / 8
self.WORDS_RATE = self.RATE / w
self.BYTES_RATE = self.WORDS_RATE * self.BYTES_WORD
if w == 32:
self.R = (8, 11, 16, 31)
self.U = (0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0x254F537A,
0x38531D48, 0x839C6E83, 0xF97A3AE5, 0x8C91D88C, 0x11EAFB59)
self.M = 0xffffffff
self.fmt = '<L'
elif w == 64:
self.R = (8, 19, 40, 63)
self.U = (0x243F6A8885A308D3, 0x13198A2E03707344, 0xA4093822299F31D0, 0x082EFA98EC4E6C89, 0xAE8858DC339325A1,
0x670A134EE52D7FA6, 0xC4316D80CD967541, 0xD21DFBF8B630B762, 0x375A18D261E7F892, 0x343D1F187D92285B)
self.M = 0xffffffffffffffff
self.fmt = '<Q'
def load(self, x):
return unpack(self.fmt, x)[0]
def store(self, x):
return pack(self.fmt, x)
def ROTR(self, a, r):
return ((a >> r) | (a << (self.NORX_W - r))) & self.M
def H(self, a, b):
return ((a ^ b) ^ ((a & b) << 1)) & self.M
def G(self, a, b, c, d):
a = self.H(a, b)
d = self.ROTR(a ^ d, self.R[0])
c = self.H(c, d)
b = self.ROTR(b ^ c, self.R[1])
a = self.H(a, b)
d = self.ROTR(a ^ d, self.R[2])
c = self.H(c, d)
b = self.ROTR(b ^ c, self.R[3])
return a, b, c, d
def F(self, S):
# Column step
S[0], S[4], S[8], S[12] = self.G(S[0], S[4], S[8], S[12])
S[1], S[5], S[9], S[13] = self.G(S[1], S[5], S[9], S[13])
S[2], S[6], S[10], S[14] = self.G(S[2], S[6], S[10], S[14])
S[3], S[7], S[11], S[15] = self.G(S[3], S[7], S[11], S[15])
# Diagonal step
S[0], S[5], S[10], S[15] = self.G(S[0], S[5], S[10], S[15])
S[1], S[6], S[11], S[12] = self.G(S[1], S[6], S[11], S[12])
S[2], S[7], S[8], S[13] = self.G(S[2], S[7], S[8], S[13])
S[3], S[4], S[9], S[14] = self.G(S[3], S[4], S[9], S[14])
def permute(self, S):
for i in xrange(self.NORX_R):
self.F(S)
def pad(self, x):
y = bytearray(self.BYTES_RATE)
y[:len(x)] = x
y[len(x)] = 0x01
y[self.BYTES_RATE-1] |= 0x80
return y
def init(self, S, n, k):
b = self.BYTES_WORD
K = [self.load(k[b*i:b*(i+1)]) for i in xrange(self.NORX_K / self.NORX_W)]
N = [self.load(n[b*i:b*(i+1)]) for i in xrange(self.NORX_N / self.NORX_W)]
U = self.U
S[0], S[1], S[2], S[3] = U[0], N[0], N[1], U[1]
S[4], S[5], S[6], S[7] = K[0], K[1], K[2], K[3]
S[8], S[9], S[10], S[11] = U[2], U[3], U[4], U[5]
S[12], S[13], S[14], S[15] = U[6], U[7], U[8], U[9]
S[12] ^= self.NORX_W
S[13] ^= self.NORX_R
S[14] ^= self.NORX_D
S[15] ^= self.NORX_T
self.permute(S)
def inject_tag(self, S, tag):
S[15] ^= tag
def process_header(self, S, x):
return self.absorb_data(S, x, self.HEADER_TAG)
def process_trailer(self, S, x):
return self.absorb_data(S, x, self.TRAILER_TAG)
def absorb_data(self, S, x, tag):
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
self.absorb_block(S, x[n*i:n*(i+1)], tag)
inlen -= n
i += 1
self.absorb_lastblock(S, x[n*i:n*i+inlen], tag)
def absorb_block(self, S, x, tag):
b = self.BYTES_WORD
self.inject_tag(S, tag)
self.permute(S)
for i in xrange(self.WORDS_RATE):
S[i] ^= self.load(x[b*i:b*(i+1)])
def absorb_lastblock(self, S, x, tag):
y = self.pad(x)
self.absorb_block(S, y, tag)
def encrypt_data(self, S, x):
c = bytearray()
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
c += self.encrypt_block(S, x[n*i:n*(i+1)])
inlen -= n
i += 1
c += self.encrypt_lastblock(S, x[n*i:n*i+inlen])
return c
def encrypt_block(self, S, x):
c = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
S[i] ^= self.load(x[b*i:b*(i+1)])
c += self.store(S[i])
return c[:self.BYTES_RATE]
def encrypt_lastblock(self, S, x):
y = self.pad(x)
c = self.encrypt_block(S, y)
return c[:len(x)]
def decrypt_data(self, S, x):
m = bytearray()
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
m += self.decrypt_block(S, x[n*i:n*(i+1)])
inlen -= n
i += 1
m += self.decrypt_lastblock(S, x[n*i:n*i+inlen])
return m
def decrypt_block(self, S, x):
m = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
c = self.load(x[b*i:b*(i+1)])
m += self.store(S[i] ^ c)
S[i] = c
return m[:self.BYTES_RATE]
def decrypt_lastblock(self, S, x):
m = bytearray()
y = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
y += self.store(S[i])
y[:len(x)] = bytearray(x)
y[len(x)] ^= 0x01
y[self.BYTES_RATE-1] ^= 0x80
for i in xrange(self.WORDS_RATE):
c = self.load(y[b*i:b*(i+1)])
m += self.store(S[i] ^ c)
S[i] = c
return m[:len(x)]
def generate_tag(self, S):
t = bytearray()
self.inject_tag(S, self.FINAL_TAG)
self.permute(S)
self.permute(S)
for i in xrange(self.WORDS_RATE):
t += self.store(S[i])
return t[:self.BYTES_TAG]
def verify_tag(self, t0, t1):
acc = 0
for i in xrange(self.BYTES_TAG):
acc |= t0[i] ^ t1[i]
return (((acc - 1) >> 8) & 1) - 1
def aead_encrypt(self, h, m, t, n, k):
assert len(k) == self.NORX_K / 8
assert len(n) == self.NORX_N / 8
c = bytearray()
S = [0] * 16
self.init(S, n, k)
self.process_header(S, h)
c += self.encrypt_data(S, m)
self.process_trailer(S, t)
c += self.generate_tag(S)
return str(c)
def aead_decrypt(self, h, c, t, n, k):
assert len(k) == self.NORX_K / 8
assert len(n) == self.NORX_N / 8
assert len(c) >= self.BYTES_TAG
m = bytearray()
c = bytearray(c)
S = [0] * 16
d = len(c)-self.BYTES_TAG
c, t0 = c[:d], c[d:]
self.init(S, n, k)
self.process_header(S, h)
m += self.decrypt_data(S, c)
self.process_trailer(S, t)
t1 = self.generate_tag(S)
if self.verify_tag(t0, t1) != 0:
m = ''
return str(m)
| cc0-1.0 | -3,776,232,993,985,714,000 | 30.515873 | 121 | 0.467137 | false |
timfreund/pycontrol-shed | pycontrolshed/model.py | 1 | 13233 | # Copyright (C) 2011 Tim Freund and contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from functools import wraps
from pycontrol import pycontrol
import logging
import pycontrolshed
import socket
# In [1]: route_domains = bigip.Networking.RouteDomain.get_list()
# In [2]: route_domains
# Out[2]: [2220L]
log = logging.getLogger('pycontrolshed.model')
def partitioned(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
partition = kwargs.get('partition', None)
if partition:
orig_partition = self.bigip.Management.Partition.get_active_partition()
self.bigip.active_partition = partition
rc = f(self, *args, **kwargs)
self.bigip.active_partition = orig_partition
return rc
else:
return f(self, *args, **kwargs)
return wrapper
class NodeAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
def disable(self, nodes, partition=None):
self.enable_disable_nodes(nodes, 'STATE_DISABLED', partition=partition)
def enable(self, nodes, partition=None):
self.enable_disable_nodes(nodes, 'STATE_ENABLED', partition=partition)
@partitioned
def enable_disable_nodes(self, nodes, target_state, partition=None):
if isinstance(nodes, basestring):
nodes = [nodes]
targets = []
states = []
for node in nodes:
targets.append(self.bigip.host_to_node(node))
states.append(target_state)
self.bigip.LocalLB.NodeAddress.set_session_enabled_state(node_addresses=targets,
states=states)
return self.status(nodes)
@partitioned
def status(self, nodes, partition=None):
if isinstance(nodes, basestring):
nodes = [nodes]
targets = [self.bigip.host_to_node(node) for node in nodes]
statuses = self.bigip.LocalLB.NodeAddress.get_session_enabled_state(node_addresses=targets)
rc = []
for node, status in zip(targets, statuses):
rc.append({'node': node,
'fqdn': self.bigip.node_to_host(node),
'status': status})
return rc
class VirtualAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
@partitioned
def servers(self, partition=None):
return self.bigip.LocalLB.VirtualServer.get_list()
@partitioned
def all_server_statistics(self, partition=None):
return self.bigip.LocalLB.VirtualServer.get_all_statistics()
@partitioned
def addresses(self, partition=None):
return self.bigip.LocalLB.VirtualAddress.get_list()
@partitioned
def all_address_statistics(self, partition=None):
return self.bigip.LocalLB.VirtualAddress.get_all_statistics()
class PoolAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
def create_type(self, type_name):
return self.bigip.LocalLB.PoolMember.typefactory.create(type_name)
@partitioned
def pools(self, partition=None):
return self.bigip.LocalLB.Pool.get_list()
@partitioned
def members(self, pools, partition=None):
if isinstance(pools, basestring):
pools = [pools]
session_status_list = self.bigip.LocalLB.PoolMember.get_session_enabled_state(pools)
monitor_status_list = self.bigip.LocalLB.PoolMember.get_monitor_status(pools)
rc = {}
for pool, sessions, monitors in zip(pools, session_status_list, monitor_status_list):
members = []
for session, monitor in zip(sessions, monitors):
members.append({'address': session.member.address,
'port': session.member.port,
'monitor': monitor,
'session': session})
rc[pool] = {'members': members}
return rc
@partitioned
def multi_member_statistics(self, pools, members, partition=None):
seq_members = []
ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence')
ippd_seq_seq.item = seq_members
empty_pools = []
if isinstance(members, list):
pass
elif isinstance(members, dict):
mlist = []
for k in pools:
if len(members[k]['members']) == 0:
empty_pools.append(k)
else:
mlist.append(members[k]['members'])
for ep in empty_pools:
pools.remove(ep)
members = mlist
for member_list in members:
seq_members.append(self.pool_members_to_ippd_seq(member_list))
stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq)
rc = {}
for p, s in zip(pools, stats):
s = self.collapse_member_statistics(s)
rc[p] = s
return rc
@partitioned
def member_statistics(self, pool, member, partition=None):
# TODO refactor this to be a special case of multi_member_statistics
pools = [pool]
if isinstance(member, basestring):
ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':'))
member = ipp_member
ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence')
ippd_seq = self.create_type('Common.IPPortDefinitionSequence')
ippd_seq_seq.item = ippd_seq
ippd_seq.item = member
# this is kind of garbage too... see TODO above
stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq)[0].statistics[0]
return stats
def disable_member(self, pool_name, members, partition=None):
return self.enable_disable_members(pool_name, members, 'STATE_DISABLED', partition=partition)
def enable_member(self, pool_name, members, partition=None):
return self.enable_disable_members(pool_name, members, 'STATE_ENABLED', partition=partition)
@partitioned
def enable_disable_members(self, pool_name, members, target_state, partition=None):
pools = [pool_name]
if isinstance(members, basestring) or members.__class__.__name__.count('IPPortDefinition'):
members = [members]
session_states = self.create_type('LocalLB.PoolMember.MemberSessionStateSequence')
session_states.item = []
for member in members:
if isinstance(member, basestring):
ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':'))
member = ipp_member
state = self.create_type('LocalLB.PoolMember.MemberSessionState')
state.member = member
state.session_state = target_state
session_states.item.append(state)
self.bigip.LocalLB.PoolMember.set_session_enabled_state(pool_names=pools,
session_states=[session_states])
return self.members(pools, partition=partition)
def pool_members_to_ippd_seq(self, members):
ippd_seq = self.create_type('Common.IPPortDefinitionSequence')
ippd_members = []
ippd_seq.item = ippd_members
for member in members:
address = None
port = None
if isinstance(member, dict):
address = member['address']
port = member['port']
elif isinstance(member, basestring):
address, port = member.split(':')
else:
raise Exception("Unknown member type")
ippd_members.append(self.bigip.host_port_to_ipportdef(address, port))
return ippd_seq
def collapse_member_statistics(self, pool_stats):
stats = {}
# LocalLB.PoolMember.MemberStatisticEntry
for mse in pool_stats.statistics:
member_id = "%s:%d" % (mse.member.address,
mse.member.port)
stats[member_id] = {}
for stat in mse.statistics:
stats[member_id][stat.type] = {'high': stat.value.high,
'low': stat.value.low}
return stats
class PyCtrlShedBIGIP(pycontrol.BIGIP):
def __init__(self, *args, **kwargs):
pycontrol.BIGIP.__init__(self, *args, **kwargs)
self.nodes = NodeAssistant(self)
self.pools = PoolAssistant(self)
self.virtual = VirtualAssistant(self)
self._active_partition = None
@property
def active_partition(self):
if self._active_partition:
return self._active_partition
self._active_partition = str(self.Management.Partition.get_active_partition())
return self._active_partition
@active_partition.setter
def active_partition(self, partition):
self.Management.Partition.set_active_partition(partition)
self._active_partition = partition
self._route_domains = self.Networking.RouteDomain.get_list()
def host_port_to_ipportdef(self, host, port):
ipp = self.LocalLB.PoolMember.typefactory.create('Common.IPPortDefinition')
ipp.address = self.host_to_node(host)
ipp.port = int(port)
return ipp
def host_to_node(self, host):
# If someone provides us with a route domain, we're going to trust
# that they know what route domain to use.
if host.count('%'):
host, route_domain = host.split('%', 1)
return "%s%%%s" % (socket.gethostbyname(host), route_domain)
node = socket.gethostbyname(host)
if (len(self.route_domains) == 1) and self.route_domains[0] != 0:
node += "%%%d" % self.route_domains[0]
return node
def node_to_ip(self, node):
if node.count('%'):
return node.split('%')[0]
return node
def node_to_host(self, node):
return socket.getfqdn(self.node_to_ip(node))
@property
def route_domains(self):
if hasattr(self, '_route_domains'):
return self._route_domains
self._route_domains = self.Networking.RouteDomain.get_list()
return self._route_domains
@property
def partitions(self):
partitions = []
for partition in self.Management.Partition.get_partition_list():
partitions.append({
'name': partition['partition_name'],
'description': partition["description"]
})
return partitions
class Environment(object):
def __init__(self, name, hosts=[], wsdls=None, username=None):
self.name = name
self.hosts = hosts
self.bigips = {}
self.username = username
self.wsdls = wsdls
if self.wsdls is None:
self.wsdls = [
'LocalLB.NodeAddress', 'LocalLB.Pool', 'LocalLB.PoolMember',
'LocalLB.Rule', 'LocalLB.VirtualAddress', 'LocalLB.VirtualServer',
'Management.Partition', 'Networking.RouteDomain',
'System.Failover',
]
for host in self.hosts:
self.connect_to_bigip(host)
def __setattr__(self, name, value):
if name in ['hosts', 'wsdls']:
if isinstance(value, str) or isinstance(value, unicode):
object.__setattr__(self, name, [host.strip() for host in value.split(',')])
else:
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, value)
def configure(self, config):
for k, v in config.items(self.name):
setattr(self, k, v)
@property
def all_bigip_connections(self):
return [self.bigips[bigip] for bigip in self.bigips]
@property
def active_bigip_connection(self):
for host in self.hosts:
bigip = self.connect_to_bigip(host)
if bigip.System.Failover.get_failover_state() == 'FAILOVER_STATE_ACTIVE':
return bigip
raise Exception('No active BIGIP devices were found in this environment (%s)' % self.name)
def connect_to_bigip(self, host, wsdls=None, force_reconnect=False):
if not(wsdls):
wsdls = self.wsdls
if not hasattr(self, 'password'):
log.debug('No password has been set, attempting to retrive via keychain capabilities')
password = pycontrolshed.get_password(self.name, self.username)
if password:
log.debug('Password retrived from the keychain')
self.password = password
else:
log.error('No password is available')
if host not in self.bigips or force_reconnect:
self.bigips[host] = PyCtrlShedBIGIP(host,
self.username,
self.password,
fromurl=True,
wsdls=wsdls)
return self.bigips[host]
| gpl-2.0 | 8,453,095,348,816,026,000 | 35.555249 | 117 | 0.590796 | false |
skob/alerta | setup.py | 1 | 1622 | #!/usr/bin/env python
import setuptools
with open('VERSION') as f:
version = f.read().strip()
with open('README.md') as f:
readme = f.read()
setuptools.setup(
name='alerta-server',
version=version,
description='Alerta server WSGI application',
long_description=readme,
url='https://github.com/guardian/alerta',
license='Apache License 2.0',
author='Nick Satterly',
author_email='[email protected]',
packages=setuptools.find_packages(exclude=['bin', 'tests']),
install_requires=[
'Flask',
'Flask-Cors>=3.0.2',
'pymongo>=3.0',
'argparse',
'requests',
'python-dateutil',
'pytz',
'PyJWT',
'bcrypt'
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'alertad = alerta.app.shell:main'
],
'alerta.plugins': [
'reject = alerta.plugins.reject:RejectPolicy'
]
},
keywords='alert monitoring system wsgi application api',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Framework :: Flask',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Monitoring',
],
)
| apache-2.0 | 1,114,736,307,577,252,900 | 27.964286 | 64 | 0.587546 | false |
Scratchcat1/AATC | flask_app/Flask_Test_App.py | 1 | 5816 | from flask import Flask, flash, redirect, render_template, request, session, abort
import random,os,ast,prettytable
from flask_app import forms
import AATC_Server_002 as AATC_Server
import HedaBot
COMMANDS = HedaBot.CreateCommandDictionary()
COMMANDS["AddFlight"][2]["Type"] = lambda x: HedaBot.SplitWaypoints(x,":")
COMMANDS["AddFlight"][2]["Query"] = COMMANDS["AddFlight"][2]["Query"].replace("returns","colons")
app = Flask(__name__)
app.config.from_object('flask_app.config')
@app.route("/")
def home():
## session["UserID"] = random.randint(0,1000)
return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},Commands = COMMANDS)
@app.route("/help")
def help_page():
return render_template("help.html",name = session.get("UserID"),user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/base")
def base():
return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/quote")
def quote():
quotes = ObtainQuote(3)
return render_template("quote.html", quotes = quotes,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/login", methods=['GET', 'POST'])
def login():
form = forms.LoginForm()
if form.validate_on_submit():
print("Loggin in ...")
if form.Username.data == form.Password.data:
session["UserID"] = form.Username.data
else:
session["UserID"] = -1
return render_template("LoginForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/dyno", methods=['GET', 'POST'])
def dyno():
items = [{"name":"Username"},{"name":"Password"}]
fields = [{"name":"Username","form":forms.wtforms.StringField('Username', validators=[forms.DataRequired()])},
{"name":"Password","form":forms.wtforms.StringField('Password', validators=[forms.DataRequired()])}]
#form = forms.DynoForm(fields = items)
form = forms.update_form(fields)
print(form.__dict__)
if form.validate_on_submit():
print("Loggin in ...")
print(form.fields.data)
if form.Username.data == form.Password.data:
session["UserID"] = form.Username.data
else:
session["UserID"] = -1
#print(form.fields.__dict__)
return render_template("DynamicForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = fields)
@app.route("/command/<string:command>",methods=['GET', 'POST'])
def Dynamic_Form(command):
if command not in COMMANDS:
return "FAILURE COMMAND DOES NOT EXIST"
Fields = Generate_Fields(command)
form = forms.update_form(Fields)
if form.validate_on_submit():
packet = Evaluate_Form(command,form)
WebConnection = AATC_Server.WebConnection(session.get("UserID",-1))
Sucess,Message,Data = WebConnection.Main(packet)
if command == "Login":
session["UserID"] = Data
Data = []
rendered = RenderResults(Sucess,Message,Data)
print(rendered)
return render_template("DynamicForm2.html",title = "Output",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields ,Commands = COMMANDS, OUTPUT = True, rendered_result = rendered)
return render_template("DynamicForm2.html",title = "command",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields,Commands = COMMANDS)
def Generate_Fields(command):
Queries = COMMANDS[command]
Fields = []
for x in range(1,len(Queries)+1):
query_name = Queries[x]["Query"]
field = {"name":query_name ,"form":forms.wtforms.StringField(query_name, validators=[forms.DataRequired()])}
Fields.append(field)
return Fields
def Evaluate_Form(command,form):
Queries = COMMANDS[command]
Arguments = []
for x in range(1,len(Queries)+1):
Arguments.append( Queries[x]["Type"](form.__dict__[Queries[x]["Query"]].data))
packet = (command,Arguments)
return packet
def RenderResults(Sucess,Message,Data = None):
render = ""
render += "Sucess >>"+str(Sucess)+"\n"
render += "Message >>"+str(Message) +"\n"
if Data not in [None,[]]:
try:
Columns = ast.literal_eval(Message)
Table = prettytable.PrettyTable(Columns)
for row in Data:
Table.add_row(row)
render += str(Table)
except Exception as e:
render += "Error creating asthetic table"+str(e) +"\n"
for row in Data:
render += str(row)+"\n"
render += ""
rendered = render.split("\n")
return rendered
##def ObtainQuote(number = 1):
## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f:
## for i,line in enumerate(f):
## pass
##
## responses = []
## for f in range(number):
## lineNum = random.randint(0,i+1)
## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f:
## for x in range(lineNum):
## line = f.readline()
## responses.append( line.rstrip().split("\t")[-1:][0])
## return responses
def main_app(app):
app.secret_key = "abcewhfuhiwuhef"
app.run(host = "0.0.0.0")
if __name__ == "__main__":
main_app(app)
| gpl-3.0 | 1,400,593,260,159,722,000 | 31.813953 | 234 | 0.597146 | false |
ragupta-git/ImcSdk | imcsdk/mometa/comm/CommSnmp.py | 1 | 8759 | """This module contains the general information for CommSnmp ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class CommSnmpConsts:
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
COM2_SEC_NONE = "None"
COM2_SEC_DISABLED = "disabled"
COM2_SEC_FULL = "full"
COM2_SEC_LIMITED = "limited"
PROTO_ALL = "all"
PROTO_NONE = "none"
PROTO_TCP = "tcp"
PROTO_UDP = "udp"
class CommSnmp(ManagedObject):
"""This is CommSnmp class."""
consts = CommSnmpConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version151f, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"]),
"modular": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version2013e, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []),
"community": MoPropertyMeta("community", "community", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []),
"port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []),
"sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []),
"trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []),
},
"modular": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []),
"community": MoPropertyMeta("community", "community", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []),
"port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []),
"sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []),
"trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []),
},
}
prop_map = {
"classic": {
"adminState": "admin_state",
"com2Sec": "com2_sec",
"community": "community",
"dn": "dn",
"engineIdKey": "engine_id_key",
"port": "port",
"rn": "rn",
"status": "status",
"sysContact": "sys_contact",
"sysLocation": "sys_location",
"trapCommunity": "trap_community",
"childAction": "child_action",
"descr": "descr",
"engineId": "engine_id",
"name": "name",
"proto": "proto",
},
"modular": {
"adminState": "admin_state",
"com2Sec": "com2_sec",
"community": "community",
"dn": "dn",
"engineIdKey": "engine_id_key",
"port": "port",
"rn": "rn",
"status": "status",
"sysContact": "sys_contact",
"sysLocation": "sys_location",
"trapCommunity": "trap_community",
"childAction": "child_action",
"descr": "descr",
"engineId": "engine_id",
"name": "name",
"proto": "proto",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_state = None
self.com2_sec = None
self.community = None
self.engine_id_key = None
self.port = None
self.status = None
self.sys_contact = None
self.sys_location = None
self.trap_community = None
self.child_action = None
self.descr = None
self.engine_id = None
self.name = None
self.proto = None
ManagedObject.__init__(self, "CommSnmp", parent_mo_or_dn, **kwargs)
| apache-2.0 | 1,721,278,884,973,908,700 | 63.881481 | 230 | 0.574837 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/tests/dict_test_case.py | 1 | 3621 | #------------------------------------------------------------------------------
#
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#------------------------------------------------------------------------------
""" Test cases for dictionary (Dict) traits. """
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest
from ..api import on_trait_change, Dict, Event, HasTraits, Str
# fixme: We'd like to use a callable instance for the listener so that we
# can maintain state, but traits barfs trying to determine the signature 8^()
def create_listener():
""" Create a listener for testing trait notifications. """
def listener(obj, trait_name, old, new):
listener.obj = obj
listener.trait_name = trait_name
listener.new = new
listener.old = old
listener.called += 1
return
listener.initialize = lambda : initialize_listener(listener)
return initialize_listener(listener)
def initialize_listener(listener):
""" Initialize a listener so it looks like it hasn't been called.
This allows us to re-use the listener without having to create and
wire-up a new one.
"""
listener.obj = None
listener.trait_name = None
listener.old = None
listener.new = None
listener.called = 0
return listener # For convenience
class DictTestCase(unittest.TestCase):
""" Test cases for dictionary (Dict) traits. """
def test_modified_event(self):
class Foo(HasTraits):
name = Str
modified = Event
@on_trait_change('name')
def _fire_modified_event(self):
self.modified = True
return
class Bar(HasTraits):
foos = Dict(Str, Foo)
modified = Event
@on_trait_change('foos_items,foos.modified')
def _fire_modified_event(self, obj, trait_name, old, new):
self.modified = True
return
bar = Bar()
listener = create_listener()
bar.on_trait_change(listener, 'modified')
# Assign a completely new dictionary.
bar.foos = {'dino' : Foo(name='dino')}
self.assertEqual(1, listener.called)
self.assertEqual('modified', listener.trait_name)
# Add an item to an existing dictionary.
listener.initialize()
fred = Foo(name='fred')
bar.foos['fred'] = fred
self.assertEqual(1, listener.called)
self.assertEqual('modified', listener.trait_name)
# Modify an item already in the dictionary.
listener.initialize()
fred.name = 'barney'
self.assertEqual(1, listener.called)
self.assertEqual('modified', listener.trait_name)
# Overwrite an item in the dictionary. This is the one that fails!
listener.initialize()
bar.foos['fred'] = Foo(name='wilma')
self.assertEqual(1, listener.called)
self.assertEqual('modified', listener.trait_name)
return
if __name__ == '__main__':
unittest.main()
#### EOF ######################################################################
| gpl-2.0 | -1,642,906,959,306,978,300 | 29.686441 | 79 | 0.574151 | false |
linuxipho/mycroft-core | mycroft/configuration/config.py | 1 | 8186 |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import json
import inflection
from os.path import exists, isfile
from requests import RequestException
from mycroft.util.json_helper import load_commented_json, merge_dict
from mycroft.util.log import LOG
from .locations import DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG
def is_remote_list(values):
''' check if this list corresponds to a backend formatted collection of
dictionaries '''
for v in values:
if not isinstance(v, dict):
return False
if "@type" not in v.keys():
return False
return True
def translate_remote(config, setting):
"""
Translate config names from server to equivalents usable
in mycroft-core.
Args:
config: base config to populate
settings: remote settings to be translated
"""
IGNORED_SETTINGS = ["uuid", "@type", "active", "user", "device"]
for k, v in setting.items():
if k not in IGNORED_SETTINGS:
# Translate the CamelCase values stored remotely into the
# Python-style names used within mycroft-core.
key = inflection.underscore(re.sub(r"Setting(s)?", "", k))
if isinstance(v, dict):
config[key] = config.get(key, {})
translate_remote(config[key], v)
elif isinstance(v, list):
if is_remote_list(v):
if key not in config:
config[key] = {}
translate_list(config[key], v)
else:
config[key] = v
else:
config[key] = v
def translate_list(config, values):
"""
Translate list formated by mycroft server.
Args:
config (dict): target config
values (list): list from mycroft server config
"""
for v in values:
module = v["@type"]
if v.get("active"):
config["module"] = module
config[module] = config.get(module, {})
translate_remote(config[module], v)
class LocalConf(dict):
"""
Config dict from file.
"""
def __init__(self, path):
super(LocalConf, self).__init__()
if path:
self.path = path
self.load_local(path)
def load_local(self, path):
"""
Load local json file into self.
Args:
path (str): file to load
"""
if exists(path) and isfile(path):
try:
config = load_commented_json(path)
for key in config:
self.__setitem__(key, config[key])
LOG.debug("Configuration {} loaded".format(path))
except Exception as e:
LOG.error("Error loading configuration '{}'".format(path))
LOG.error(repr(e))
else:
LOG.debug("Configuration '{}' not defined, skipping".format(path))
def store(self, path=None):
"""
Cache the received settings locally. The cache will be used if
the remote is unreachable to load settings that are as close
to the user's as possible
"""
path = path or self.path
with open(path, 'w') as f:
json.dump(self, f, indent=2)
def merge(self, conf):
merge_dict(self, conf)
class RemoteConf(LocalConf):
"""
Config dict fetched from mycroft.ai
"""
def __init__(self, cache=None):
super(RemoteConf, self).__init__(None)
cache = cache or '/var/tmp/mycroft_web_cache.json'
from mycroft.api import is_paired
if not is_paired():
self.load_local(cache)
return
try:
# Here to avoid cyclic import
from mycroft.api import DeviceApi
api = DeviceApi()
setting = api.get_settings()
try:
location = api.get_location()
except RequestException as e:
LOG.error("RequestException fetching remote location: {}"
.format(str(e)))
if exists(cache) and isfile(cache):
location = load_commented_json(cache).get('location')
if location:
setting["location"] = location
# Remove server specific entries
config = {}
translate_remote(config, setting)
for key in config:
self.__setitem__(key, config[key])
self.store(cache)
except RequestException as e:
LOG.error("RequestException fetching remote configuration: {}"
.format(str(e)))
self.load_local(cache)
except Exception as e:
LOG.error("Failed to fetch remote configuration: %s" % repr(e),
exc_info=True)
self.load_local(cache)
class Configuration:
__config = {} # Cached config
__patch = {} # Patch config that skills can update to override config
@staticmethod
def get(configs=None, cache=True):
"""
Get configuration, returns cached instance if available otherwise
builds a new configuration dict.
Args:
configs (list): List of configuration dicts
cache (boolean): True if the result should be cached
"""
if Configuration.__config:
return Configuration.__config
else:
return Configuration.load_config_stack(configs, cache)
@staticmethod
def load_config_stack(configs=None, cache=False):
"""
load a stack of config dicts into a single dict
Args:
configs (list): list of dicts to load
cache (boolean): True if result should be cached
Returns: merged dict of all configuration files
"""
if not configs:
configs = [LocalConf(DEFAULT_CONFIG), RemoteConf(),
LocalConf(SYSTEM_CONFIG), LocalConf(USER_CONFIG),
Configuration.__patch]
else:
# Handle strings in stack
for index, item in enumerate(configs):
if isinstance(item, str):
configs[index] = LocalConf(item)
# Merge all configs into one
base = {}
for c in configs:
merge_dict(base, c)
# copy into cache
if cache:
Configuration.__config.clear()
for key in base:
Configuration.__config[key] = base[key]
return Configuration.__config
else:
return base
@staticmethod
def init(ws):
"""
Setup websocket handlers to update config.
Args:
ws: Websocket instance
"""
ws.on("configuration.updated", Configuration.updated)
ws.on("configuration.patch", Configuration.patch)
@staticmethod
def updated(message):
"""
handler for configuration.updated, triggers an update
of cached config.
"""
Configuration.load_config_stack(cache=True)
@staticmethod
def patch(message):
"""
patch the volatile dict usable by skills
Args:
message: Messagebus message should contain a config
in the data payload.
"""
config = message.data.get("config", {})
merge_dict(Configuration.__patch, config)
Configuration.load_config_stack(cache=True)
| apache-2.0 | 6,509,178,606,018,905,000 | 30.363985 | 78 | 0.557049 | false |
wevote/WebAppPublic | apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py | 1 | 2560 | # apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def positions_count_for_one_ballot_item_doc_template_values(url_root):
"""
Show documentation about positionsCountForOneBallotItem
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'ballot_item_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for one ballot item.',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "success": boolean,\n' \
' "status": string,\n' \
' "ballot_item_we_vote_id: string,\n' \
' "ballot_item_list": list ' \
'(we return a list so this API can be consumed like positionsCountForAllBallotItems)\n' \
' [\n' \
' "ballot_item_we_vote_id": string,\n' \
' "support_count": integer,\n' \
' "oppose_count": integer,\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'positionsCountForOneBallotItem',
'api_slug': 'positionsCountForOneBallotItem',
'api_introduction':
"Retrieve all positions held by this voter in one list.",
'try_now_link': 'apis_v1:positionsCountForOneBallotItemView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| bsd-3-clause | 5,725,262,911,566,859,000 | 38.384615 | 115 | 0.5375 | false |
longde123/MultiversePlatform | server/config/common/character_factory.py | 1 | 4399 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from multiverse.mars import *
from multiverse.mars.objects import *
from multiverse.mars.core import *
from multiverse.mars.events import *
from multiverse.mars.util import *
from multiverse.mars.plugins import *
from multiverse.server.plugins import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from java.lang import *
displayContext = DisplayContext("human_female.mesh")
displayContext.addSubmesh(DisplayContext.Submesh("bodyShape-lib.0",
"human_female.skin_material"))
displayContext.addSubmesh(DisplayContext.Submesh("head_aShape-lib.0",
"human_female.head_a_material"))
displayContext.addSubmesh(DisplayContext.Submesh("hair_bShape-lib.0",
"human_female.hair_b_material"))
# default player template
player = Template("DefaultPlayer")
player.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_DISPLAY_CONTEXT,
displayContext)
player.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_OBJECT_TYPE,
ObjectTypes.player)
player.put(InventoryClient.NAMESPACE,
InventoryClient.TEMPL_ITEMS,
"")
ObjectManagerClient.registerTemplate(player)
# character factory
class SampleFactory (CharacterFactory):
def createCharacter(self, worldName, uid, properties):
name = properties.get("characterName");
# Player start location
loc = Point(-135343, 0, -202945)
# Player start instance; assumes you have an instance named "default"
instanceOid = InstanceClient.getInstanceOid("default")
overrideTemplate = Template()
if name:
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_NAME, name)
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_INSTANCE, Long(instanceOid))
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_LOC, loc)
# Initialize the player's instance restore stack
restorePoint = InstanceRestorePoint("default", loc)
restorePoint.setFallbackFlag(True)
restoreStack = LinkedList()
restoreStack.add(restorePoint)
overrideTempate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_INSTANCE_RESTORE_STACK, restoreStack)
overrideTempate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_CURRENT_INSTANCE_NAME, "default")
# Make the player persistent (will be saved in database)
overrideTemplate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_PERSISTENT, Boolean(True));
# Create the player object
objOid = ObjectManagerClient.generateObject(
"DefaultPlayer", overrideTemplate)
Log.debug("SampleFactory: generated obj oid=" + str(objOid))
return objOid
sampleFactory = SampleFactory()
LoginPlugin.getCharacterGenerator().setCharacterFactory(sampleFactory);
| mit | 2,857,242,688,523,992,600 | 39.357798 | 81 | 0.711525 | false |
shoopio/shoop | shuup/importer/admin_module/import_views.py | 1 | 7325 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import logging
import os
from datetime import datetime
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.transaction import atomic
from django.http.response import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, TemplateView, View
from shuup.admin.shop_provider import get_shop
from shuup.importer.admin_module.forms import ImportForm, ImportSettingsForm
from shuup.importer.transforms import transform_file
from shuup.importer.utils import (
get_import_file_path, get_importer, get_importer_choices
)
from shuup.utils.excs import Problem
logger = logging.getLogger(__name__)
class ImportProcessView(TemplateView):
template_name = "shuup/importer/admin/import_process.jinja"
importer = None
def dispatch(self, request, *args, **kwargs):
self.importer_cls = get_importer(request.GET.get("importer"))
self.model_str = request.GET.get("importer")
self.lang = request.GET.get("lang")
return super(ImportProcessView, self).dispatch(request, *args, **kwargs)
def _transform_request_file(self):
try:
filename = get_import_file_path(self.request.GET.get("n"))
if not os.path.isfile(filename):
raise ValueError(_("%s is not a file") % self.request.GET.get("n"))
except:
raise Problem(_("File missing."))
try:
mode = "xls"
if filename.endswith("xlsx"):
mode = "xlsx"
if filename.endswith("csv"):
mode = "csv"
if self.importer_cls.custom_file_transformer:
return self.importer_cls.transform_file(mode, filename)
return transform_file(mode, filename)
except (Exception, RuntimeError) as e:
messages.error(self.request, e)
def prepare(self):
self.data = self._transform_request_file()
if self.data is None:
return False
self.importer = self.importer_cls(self.data, get_shop(self.request), self.lang)
self.importer.process_data()
if self.request.method == "POST":
# check if mapping was done
for field in self.importer.unmatched_fields:
key = "remap[%s]" % field
vals = self.request.POST.getlist(key)
if len(vals):
self.importer.manually_match(field, vals[0])
self.importer.do_remap()
self.settings_form = ImportSettingsForm(data=self.request.POST if self.request.POST else None)
if self.settings_form.is_bound:
self.settings_form.is_valid()
return True
def post(self, request, *args, **kwargs):
prepared = self.prepare()
if not prepared:
return redirect(reverse("shuup_admin:importer.import"))
try:
with atomic():
self.importer.do_import(self.settings_form.cleaned_data["import_mode"])
except Exception:
logger.exception("Failed to import data")
messages.error(request, _("Failed to import the file."))
return redirect(reverse("shuup_admin:importer.import"))
self.template_name = "shuup/importer/admin/import_process_complete.jinja"
return self.render_to_response(self.get_context_data(**kwargs))
def get_context_data(self, **kwargs):
context = super(ImportProcessView, self).get_context_data(**kwargs)
context["data"] = self.data
context["importer"] = self.importer
context["form"] = self.settings_form
context["model_fields"] = self.importer.get_fields_for_mapping()
context["visible_rows"] = self.data.rows[1:5]
return context
def get(self, request, *args, **kwargs):
prepared = self.prepare()
if not prepared:
return redirect(reverse("shuup_admin:importer.import"))
return self.render_to_response(self.get_context_data(**kwargs))
class ImportView(FormView):
template_name = "shuup/importer/admin/import.jinja"
form_class = ImportForm
def post(self, request, *args, **kwargs):
file = self.request.FILES["file"]
basename, ext = os.path.splitext(file.name)
import_name = "%s%s" % (hashlib.sha256(("%s" % datetime.now()).encode("utf-8")).hexdigest(), ext)
full_path = get_import_file_path(import_name)
if not os.path.isdir(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(full_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
next_url = request.POST.get("next")
importer = request.POST.get("importer")
lang = request.POST.get("language")
return redirect("%s?n=%s&importer=%s&lang=%s" % (next_url, import_name, importer, lang))
def get_form_kwargs(self):
kwargs = super(ImportView, self).get_form_kwargs()
initial = kwargs.get("initial", {})
initial["importer"] = self.request.GET.get("importer", initial.get("initial"))
kwargs.update({
"request": self.request,
"initial": initial
})
return kwargs
def get_context_data(self, **kwargs):
context = super(ImportView, self).get_context_data(**kwargs)
# check whether the importer has a example file template
# if so, we also add a url to download the example file
importer = self.request.GET.get("importer")
# no importer passed, get the first choice available
if not importer:
importers = list(get_importer_choices())
if importers:
importer = importers[0][0]
if importer:
importer_cls = get_importer(importer)
context.update(importer_cls.get_help_context_data(self.request))
context["importer"] = importer_cls
return context
class ExampleFileDownloadView(View):
def get(self, request, *args, **kwargs):
importer = request.GET.get("importer")
file_name = request.GET.get("file_name")
if not importer or not file_name:
return HttpResponseBadRequest(_("Invalid parameters"))
importer_cls = get_importer(importer)
if not importer_cls or not importer_cls.has_example_file():
raise Http404(_("Invalid importer"))
example_file = importer_cls.get_example_file(file_name)
if not example_file:
raise Http404(_("Invalid file name"))
response = HttpResponse(content_type=example_file.content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % example_file.file_name
data = importer_cls.get_example_file_content(example_file, request)
if not data:
raise Http404(_("File not found"))
data.seek(0)
response.write(data.getvalue())
return response
| agpl-3.0 | -5,630,403,041,267,978,000 | 37.151042 | 105 | 0.632628 | false |
PeridotYouClod/gRPC-Makerboards | generated/proto_out/sensors_pb2_grpc.py | 1 | 20413 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import generated.proto_out.sensors_pb2 as sensors__pb2
class FrontEndStub(object):
"""http://www.grpc.io/docs/guides/concepts.html is good reference for #tags
#FrontEnd #Simple
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLux = channel.unary_unary(
'/FrontEnd/GetLux',
request_serializer=sensors__pb2.GetLuxRequest.SerializeToString,
response_deserializer=sensors__pb2.GetLuxReply.FromString,
)
self.GetTemperature = channel.unary_unary(
'/FrontEnd/GetTemperature',
request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString,
response_deserializer=sensors__pb2.GetTemperatureReply.FromString,
)
self.GetSound = channel.unary_unary(
'/FrontEnd/GetSound',
request_serializer=sensors__pb2.GetSoundRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSoundReply.FromString,
)
self.GetIrButtonPressed = channel.unary_unary(
'/FrontEnd/GetIrButtonPressed',
request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString,
)
self.GetSonar = channel.unary_unary(
'/FrontEnd/GetSonar',
request_serializer=sensors__pb2.GetSonarRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSonarReply.FromString,
)
self.SetLedStrip = channel.unary_unary(
'/FrontEnd/SetLedStrip',
request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString,
response_deserializer=sensors__pb2.SetLedStripReply.FromString,
)
self.GetButtonPressed = channel.unary_unary(
'/FrontEnd/GetButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
self.SendToRfBlaster = channel.unary_unary(
'/FrontEnd/SendToRfBlaster',
request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString,
response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString,
)
class FrontEndServicer(object):
"""http://www.grpc.io/docs/guides/concepts.html is good reference for #tags
#FrontEnd #Simple
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def GetLux(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTemperature(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSound(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIrButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSonar(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLedStrip(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRfBlaster(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FrontEndServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLux': grpc.unary_unary_rpc_method_handler(
servicer.GetLux,
request_deserializer=sensors__pb2.GetLuxRequest.FromString,
response_serializer=sensors__pb2.GetLuxReply.SerializeToString,
),
'GetTemperature': grpc.unary_unary_rpc_method_handler(
servicer.GetTemperature,
request_deserializer=sensors__pb2.GetTemperatureRequest.FromString,
response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString,
),
'GetSound': grpc.unary_unary_rpc_method_handler(
servicer.GetSound,
request_deserializer=sensors__pb2.GetSoundRequest.FromString,
response_serializer=sensors__pb2.GetSoundReply.SerializeToString,
),
'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetIrButtonPressed,
request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString,
),
'GetSonar': grpc.unary_unary_rpc_method_handler(
servicer.GetSonar,
request_deserializer=sensors__pb2.GetSonarRequest.FromString,
response_serializer=sensors__pb2.GetSonarReply.SerializeToString,
),
'SetLedStrip': grpc.unary_unary_rpc_method_handler(
servicer.SetLedStrip,
request_deserializer=sensors__pb2.SetLedStripRequest.FromString,
response_serializer=sensors__pb2.SetLedStripReply.SerializeToString,
),
'GetButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
'SendToRfBlaster': grpc.unary_unary_rpc_method_handler(
servicer.SendToRfBlaster,
request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString,
response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'FrontEnd', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PushFrontEndStub(object):
"""#FrontEnd #ServerStreaming
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world. This server is for streaming events.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Subscribe = channel.unary_unary(
'/PushFrontEnd/Subscribe',
request_serializer=sensors__pb2.SubscribeRequest.SerializeToString,
response_deserializer=sensors__pb2.SubscribeReply.FromString,
)
self.StreamButtonPressed = channel.unary_stream(
'/PushFrontEnd/StreamButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class PushFrontEndServicer(object):
"""#FrontEnd #ServerStreaming
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world. This server is for streaming events.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def Subscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushFrontEndServicer_to_server(servicer, server):
rpc_method_handlers = {
'Subscribe': grpc.unary_unary_rpc_method_handler(
servicer.Subscribe,
request_deserializer=sensors__pb2.SubscribeRequest.FromString,
response_serializer=sensors__pb2.SubscribeReply.SerializeToString,
),
'StreamButtonPressed': grpc.unary_stream_rpc_method_handler(
servicer.StreamButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'PushFrontEnd', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ArduinoStub(object):
"""#Backend #Simple
Arduino server handles interactions between Arduino brand devices & other
servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetIrButtonPressed = channel.unary_unary(
'/Arduino/GetIrButtonPressed',
request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString,
)
self.GetSonar = channel.unary_unary(
'/Arduino/GetSonar',
request_serializer=sensors__pb2.GetSonarRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSonarReply.FromString,
)
self.SendToRfBlaster = channel.unary_unary(
'/Arduino/SendToRfBlaster',
request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString,
response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString,
)
class ArduinoServicer(object):
"""#Backend #Simple
Arduino server handles interactions between Arduino brand devices & other
servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def GetIrButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSonar(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRfBlaster(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ArduinoServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetIrButtonPressed,
request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString,
),
'GetSonar': grpc.unary_unary_rpc_method_handler(
servicer.GetSonar,
request_deserializer=sensors__pb2.GetSonarRequest.FromString,
response_serializer=sensors__pb2.GetSonarReply.SerializeToString,
),
'SendToRfBlaster': grpc.unary_unary_rpc_method_handler(
servicer.SendToRfBlaster,
request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString,
response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Arduino', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class WioLinkStub(object):
"""#Backend #Simple
WioLink server handles interactions between Wio Link brand devices & other
servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLux = channel.unary_unary(
'/WioLink/GetLux',
request_serializer=sensors__pb2.GetLuxRequest.SerializeToString,
response_deserializer=sensors__pb2.GetLuxReply.FromString,
)
self.GetTemperature = channel.unary_unary(
'/WioLink/GetTemperature',
request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString,
response_deserializer=sensors__pb2.GetTemperatureReply.FromString,
)
self.GetSound = channel.unary_unary(
'/WioLink/GetSound',
request_serializer=sensors__pb2.GetSoundRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSoundReply.FromString,
)
self.SetLedStrip = channel.unary_unary(
'/WioLink/SetLedStrip',
request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString,
response_deserializer=sensors__pb2.SetLedStripReply.FromString,
)
self.GetButtonPressed = channel.unary_unary(
'/WioLink/GetButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class WioLinkServicer(object):
"""#Backend #Simple
WioLink server handles interactions between Wio Link brand devices & other
servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def GetLux(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTemperature(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSound(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLedStrip(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WioLinkServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLux': grpc.unary_unary_rpc_method_handler(
servicer.GetLux,
request_deserializer=sensors__pb2.GetLuxRequest.FromString,
response_serializer=sensors__pb2.GetLuxReply.SerializeToString,
),
'GetTemperature': grpc.unary_unary_rpc_method_handler(
servicer.GetTemperature,
request_deserializer=sensors__pb2.GetTemperatureRequest.FromString,
response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString,
),
'GetSound': grpc.unary_unary_rpc_method_handler(
servicer.GetSound,
request_deserializer=sensors__pb2.GetSoundRequest.FromString,
response_serializer=sensors__pb2.GetSoundReply.SerializeToString,
),
'SetLedStrip': grpc.unary_unary_rpc_method_handler(
servicer.SetLedStrip,
request_deserializer=sensors__pb2.SetLedStripRequest.FromString,
response_serializer=sensors__pb2.SetLedStripReply.SerializeToString,
),
'GetButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'WioLink', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PushStub(object):
"""#ServerStreaming #Backend
Push server pushes data when a sensor event occurs for the client to react
to.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Subscribe = channel.unary_unary(
'/Push/Subscribe',
request_serializer=sensors__pb2.SubscribeRequest.SerializeToString,
response_deserializer=sensors__pb2.SubscribeReply.FromString,
)
self.StreamButtonPressed = channel.unary_stream(
'/Push/StreamButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class PushServicer(object):
"""#ServerStreaming #Backend
Push server pushes data when a sensor event occurs for the client to react
to.
"""
def Subscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushServicer_to_server(servicer, server):
rpc_method_handlers = {
'Subscribe': grpc.unary_unary_rpc_method_handler(
servicer.Subscribe,
request_deserializer=sensors__pb2.SubscribeRequest.FromString,
response_serializer=sensors__pb2.SubscribeReply.SerializeToString,
),
'StreamButtonPressed': grpc.unary_stream_rpc_method_handler(
servicer.StreamButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Push', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit | -6,543,290,364,991,223,000 | 38.483559 | 85 | 0.725371 | false |
skirpichev/omg | diofant/vector/dyadic.py | 1 | 8076 | from ..core import AtomicExpr, Integer, Pow
from ..matrices import ImmutableMatrix
from .basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
class Dyadic(BasisDependent):
"""
Super class for all Dyadic-classes.
References
==========
* https://en.wikipedia.org/wiki/Dyadic_tensor
* Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
"""
_op_priority = 13.0
@property
def components(self):
"""
Returns the components of this dyadic in the form of a
Python dictionary mapping BaseDyadic instances to the
corresponding measure numbers.
"""
# The '_components' attribute is defined according to the
# subclass of Dyadic the instance belongs to.
return self._components
def dot(self, other):
"""
Returns the dot product(also called inner product) of this
Dyadic, with another Dyadic or Vector.
If 'other' is a Dyadic, this returns a Dyadic. Else, it returns
a Vector (unless an error is encountered).
Parameters
==========
other : Dyadic/Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> D1 = N.i.outer(N.j)
>>> D2 = N.j.outer(N.j)
>>> D1.dot(D2)
(N.i|N.j)
>>> D1.dot(N.j)
N.i
"""
from .vector import Vector
if isinstance(other, BasisDependentZero):
return Vector.zero
elif isinstance(other, Vector):
outvec = Vector.zero
for k, v in self.components.items():
vect_dot = k.args[1].dot(other)
outvec += vect_dot * v * k.args[0]
return outvec
elif isinstance(other, Dyadic):
outdyad = Dyadic.zero
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
vect_dot = k1.args[1].dot(k2.args[0])
outer_product = k1.args[0].outer(k2.args[1])
outdyad += vect_dot * v1 * v2 * outer_product
return outdyad
else:
raise TypeError('Inner product is not defined for ' +
str(type(other)) + ' and Dyadics.')
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product between this Dyadic, and a Vector, as a
Vector instance.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> d = N.i.outer(N.i)
>>> d.cross(N.j)
(N.i|N.k)
"""
from .vector import Vector
if other == Vector.zero:
return Dyadic.zero
elif isinstance(other, Vector):
outdyad = Dyadic.zero
for k, v in self.components.items():
cross_product = k.args[1].cross(other)
outer = k.args[0].outer(cross_product)
outdyad += v * outer
return outdyad
else:
raise TypeError(str(type(other)) + ' not supported for ' +
'cross with dyadics')
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def to_matrix(self, system, second_system=None):
"""
Returns the matrix form of the dyadic with respect to one or two
coordinate systems.
Parameters
==========
system : CoordSysCartesian
The coordinate system that the rows and columns of the matrix
correspond to. If a second system is provided, this
only corresponds to the rows of the matrix.
second_system : CoordSysCartesian, optional, default=None
The coordinate system that the columns of the matrix correspond
to.
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> v = N.i + 2*N.j
>>> d = v.outer(N.i)
>>> d.to_matrix(N)
Matrix([
[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
>>> q = Symbol('q')
>>> P = N.orient_new_axis('P', q, N.k)
>>> d.to_matrix(N, P)
Matrix([
[ cos(q), -sin(q), 0],
[2*cos(q), -2*sin(q), 0],
[ 0, 0, 0]])
"""
if second_system is None:
second_system = system
return ImmutableMatrix([i.dot(self).dot(j) for i in system for j in
second_system]).reshape(3, 3)
class BaseDyadic(Dyadic, AtomicExpr):
"""Class to denote a base dyadic tensor component."""
def __new__(cls, vector1, vector2):
from .vector import Vector, BaseVector, VectorZero
# Verify arguments
if not isinstance(vector1, (BaseVector, VectorZero)) or \
not isinstance(vector2, (BaseVector, VectorZero)):
raise TypeError('BaseDyadic cannot be composed of non-base ' +
'vectors')
# Handle special case of zero vector
elif vector1 == Vector.zero or vector2 == Vector.zero:
return Dyadic.zero
# Initialize instance
obj = super().__new__(cls, vector1, vector2)
obj._base_instance = obj
obj._measure_number = 1
obj._components = {obj: Integer(1)}
obj._sys = vector1._sys
obj._pretty_form = ('(' + vector1._pretty_form + '|' +
vector2._pretty_form + ')')
obj._latex_form = ('(' + vector1._latex_form + '{|}' +
vector2._latex_form + ')')
return obj
def __str__(self, printer=None):
return '(' + str(self.args[0]) + '|' + str(self.args[1]) + ')'
_diofantstr = __str__
_diofantrepr = _diofantstr
class DyadicMul(BasisDependentMul, Dyadic):
"""Products of scalars and BaseDyadics."""
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_dyadic(self):
"""The BaseDyadic involved in the product."""
return self._base_instance
@property
def measure_number(self):
"""The scalar expression involved in the definition of
this DyadicMul.
"""
return self._measure_number
class DyadicAdd(BasisDependentAdd, Dyadic):
"""Class to hold dyadic sums."""
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def __str__(self, printer=None):
ret_str = ''
items = list(self.components.items())
items.sort(key=lambda x: x[0].__str__())
for k, v in items:
temp_dyad = k * v
ret_str += temp_dyad.__str__(printer) + ' + '
return ret_str[:-3]
__repr__ = __str__
_diofantstr = __str__
class DyadicZero(BasisDependentZero, Dyadic):
"""Class to denote a zero dyadic."""
_op_priority = 13.1
_pretty_form = '(0|0)'
_latex_form = r'(\mathbf{\hat{0}}|\mathbf{\hat{0}})'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
def _dyad_div(one, other):
"""Helper for division involving dyadics."""
if isinstance(other, Dyadic):
raise TypeError('Cannot divide two dyadics')
else:
return DyadicMul(one, Pow(other, -1))
Dyadic._expr_type = Dyadic
Dyadic._mul_func = DyadicMul
Dyadic._add_func = DyadicAdd
Dyadic._zero_func = DyadicZero
Dyadic._base_func = BaseDyadic
Dyadic._div_helper = _dyad_div
Dyadic.zero = DyadicZero()
| bsd-3-clause | -3,575,280,719,085,110,000 | 28.911111 | 79 | 0.541605 | false |
UCBerkeleySETI/blimpy | blimpy/plotting/plot_time_series.py | 1 | 1628 | from .config import *
from ..utils import rebin, db
from .plot_utils import calc_extent
def plot_time_series(wf, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = wf.grab_data(f_start, f_stop, if_id)
# Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = np.nanmean(plot_data, axis=1)
else:
plot_data = np.nanmean(plot_data)
if logged and wf.header['nbits'] >= 8:
plot_data = db(plot_data)
# Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = calc_extent(wf, plot_f=plot_f, plot_t=wf.timestamps, MJD_time=MJD_time)
plot_t = np.linspace(extent[2], extent[3], len(wf.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both', tight=True)
| bsd-3-clause | -8,949,116,032,728,782,000 | 30.921569 | 117 | 0.616093 | false |
openstack/horizon | openstack_dashboard/dashboards/admin/volumes/forms.py | 1 | 10388 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.snapshots.forms \
import populate_status_choices
from openstack_dashboard.dashboards.project.volumes \
import forms as project_forms
from openstack_dashboard.dashboards.project.volumes.tables \
import VolumesTableBase as volumes_table
# This set of states was pulled from cinder's admin_actions.py
SETTABLE_STATUSES = (
'attaching', 'available', 'creating', 'deleting', 'detaching', 'error',
'error_deleting', 'in-use', 'maintenance', 'reserved')
STATUS_CHOICES = tuple(
status for status in volumes_table.STATUS_DISPLAY_CHOICES
if status[0] in SETTABLE_STATUSES
)
class ManageVolume(forms.SelfHandlingForm):
identifier = forms.CharField(
max_length=255,
label=_("Identifier"),
help_text=_("Name or other identifier for existing volume"))
id_type = forms.ThemableChoiceField(
label=_("Identifier Type"),
help_text=_("Type of backend device identifier provided"))
host = forms.CharField(
max_length=255,
label=_("Host"),
help_text=_("Cinder host on which the existing volume resides; "
"takes the form: host@backend-name#pool"))
name = forms.CharField(
max_length=255,
label=_("Volume Name"),
required=False,
help_text=_("Volume name to be assigned"))
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 4}),
label=_("Description"), required=False)
metadata = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 2}),
label=_("Metadata"), required=False,
help_text=_("Comma-separated key=value pairs"),
validators=[utils_validators.validate_metadata])
volume_type = forms.ThemableChoiceField(
label=_("Volume Type"),
required=False)
availability_zone = forms.ThemableChoiceField(
label=_("Availability Zone"),
required=False)
bootable = forms.BooleanField(
label=_("Bootable"),
required=False,
help_text=_("Specifies that the newly created volume "
"should be marked as bootable"))
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
self.fields['id_type'].choices = [("source-name", _("Name"))] + \
[("source-id", _("ID"))]
volume_types = cinder.volume_type_list(request)
self.fields['volume_type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
self.fields['availability_zone'].choices = \
project_forms.availability_zones(request)
def handle(self, request, data):
try:
az = data.get('availability_zone')
# assume user enters metadata with "key1=val1,key2=val2"
# convert to dictionary
metadataDict = {}
metadata = data.get('metadata')
if metadata:
metadata.replace(" ", "")
for item in metadata.split(','):
key, value = item.split('=')
metadataDict[key] = value
cinder.volume_manage(request,
host=data['host'],
identifier=data['identifier'],
id_type=data['id_type'],
name=data['name'],
description=data['description'],
volume_type=data['volume_type'],
availability_zone=az,
metadata=metadataDict,
bootable=data['bootable'])
# for success message, use identifier if user does not
# provide a volume name
volume_name = data['name']
if not volume_name:
volume_name = data['identifier']
messages.success(
request,
_('Successfully sent the request to manage volume: %s')
% volume_name)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to manage volume."),
redirect=redirect)
class UnmanageVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.CharField(label=_("Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_id = forms.CharField(label=_("ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
def handle(self, request, data):
try:
cinder.volume_unmanage(request, self.initial['volume_id'])
messages.success(
request,
_('Successfully sent the request to unmanage volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to unmanage volume."),
redirect=redirect)
class MigrateVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ThemableChoiceField(
label=_("Destination Host"),
help_text=_("Choose a Host to migrate to."))
force_host_copy = forms.BooleanField(label=_("Force Host Copy"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.name, host.name)
for host in hosts
if host.name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available")))
return sorted(host_list)
def handle(self, request, data):
try:
cinder.volume_migrate(request,
self.initial['volume_id'],
data['host'],
data['force_host_copy'])
messages.success(
request,
_('Successfully sent the request to migrate volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Failed to migrate volume."),
redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ThemableChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
# Initial values have to be operated before super() otherwise the
# initial values will get overwritten back to the raw value
current_status = kwargs['initial']['status']
kwargs['initial'].pop('status')
super().__init__(request, *args, **kwargs)
self.fields['status'].choices = populate_status_choices(
current_status, STATUS_CHOICES)
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status, redirect=redirect)
| apache-2.0 | 679,940,254,032,079,700 | 40.386454 | 79 | 0.545148 | false |
ijmarshall/robotreviewer3 | robotreviewer/robots/rationale_robot.py | 1 | 13649 | """
the BiasRobot class takes the full text of a clinical trial as
input as a robotreviewer.data_structures.MultiDict, and returns
bias information in the same format, which can easily be converted
to JSON.
there are multiple ways to build a MultiDict, however the most common
way used in this project is as a PDF binary.
pdf_binary = ...
pdfr = PDFReader()
data = pdfr.convert(pdf_binary)
robot = BiasRobot()
annotations = robot.annotate(data)
"""
# Authors: Iain Marshall <[email protected]>
# Joel Kuiper <[email protected]>
# Byron Wallace <[email protected]>
import uuid
import operator
import pickle
import numpy as np
from collections import OrderedDict, defaultdict
import robotreviewer
import logging
log = logging.getLogger(__name__)
import sys
sys.path.append('robotreviewer/ml') # need this for loading the rationale_CNN module
from celery.contrib import rdb
__version__ = {"name": "Risk of bias (CNN/SVM ensemble)",
"version_number": "3",
"publication_url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5300751/",
"cite_bibtex": """@inproceedings{zhang2016rationale,
title={Rationale-augmented convolutional neural networks for text classification},
author={Zhang, Ye and Marshall, Iain and Wallace, Byron C},
booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing},
volume={2016},
pages={795},
year={2016},
organization={NIH Public Access}
}""", "cite_text": "Zhang, Ye, Iain J Marshall, and Byron C. Wallace. “Rationale-Augmented Convolutional Neural Networks for Text Classification.” Proceedings of Empirical Methods in Natural Language Processing (EMNLP), 2016."
}
class BiasRobot:
def __init__(self, top_k=3):
"""
`top_k` refers to 'top-k recall'.
top-1 recall will return the single most relevant sentence
in the document, and top-3 recall the 3 most relevant.
The validation study assessed the accuracy of top-3 and top-1
and we suggest top-3 as default
"""
self.bias_domains = ['Random sequence generation']
self.top_k = top_k
self.bias_domains = {'RSG': 'Random sequence generation',
'AC': 'Allocation concealment',
'BPP': 'Blinding of participants and personnel',
'BOA': 'Blinding of outcome assessment',
'IOD': 'Incomplete outcome data',
'SR': 'Selective reporting'
}
###
# Here we take a simple ensembling approach in which we combine the
# predictions made by our rationaleCNN model and the JAMIA (linear)
# multi task variant.
###
self.all_domains = ['RSG', 'AC', 'BPP', 'BOA']
from robotreviewer.ml.classifier import MiniClassifier
from robotreviewer.ml.vectorizer import ModularVectorizer
from robotreviewer.ml.rationale_CNN import RationaleCNN, Document
global RationaleCNN, Document, MiniClassifier, ModularVectorizer
# CNN domains
vectorizer_str = 'robotreviewer/data/keras/vectorizers/{}.pickle'
arch_str = 'robotreviewer/data/keras/models/{}.json'
weight_str = 'robotreviewer/data/keras/models/{}.hdf5'
self.CNN_models = OrderedDict()
for bias_domain in ['RSG', 'AC', 'BPP', 'BOA']:
# Load vectorizer and keras model
vectorizer_loc = vectorizer_str.format(bias_domain)
arch_loc = arch_str.format(bias_domain)
weight_loc = weight_str.format(bias_domain)
preprocessor = pickle.load(open(vectorizer_loc, 'rb'))
preprocessor.tokenizer.oov_token = None # TODO check with Byron
self.CNN_models[bias_domain] = RationaleCNN(preprocessor,
document_model_architecture_path=arch_loc,
document_model_weights_path=weight_loc)
# Linear domains (these are joint models!)
self.linear_sent_clf = MiniClassifier(robotreviewer.get_data('bias/bias_sent_level.npz'))
self.linear_doc_clf = MiniClassifier(robotreviewer.get_data('bias/bias_doc_level.npz'))
self.linear_vec = ModularVectorizer(norm=None, non_negative=True, binary=True, ngram_range=(1, 2),
n_features=2**26)
def simple_borda_count(self, a, b, weights=None):
'''
Basic Borda count implementation for just two lists.
Assumes that a and b are lists of indices sorted
in *increasing* preference (so top-ranked sentence
should be the last element).
'''
rank_scores_dict = defaultdict(int)
if weights is None:
weights = np.ones(2)
# ensure list sizes are equal. note that the CNN
# model will always assume/force 200 sentences,
# whereas BoW model will not. so here we trim if
# necessary, effectively taking the max_index
# top sentences from each model and pooling these.
a_n, b_n = len(a), len(b)
max_index = min(a_n, b_n)
a = a[-max_index:]
b = b[-max_index:]
for i in range(max_index):
score = i+1 # 1 ... m
rank_scores_dict[a[i]] += weights[0]*score
rank_scores_dict[b[i]] += weights[1]*score
sorted_indices = sorted(rank_scores_dict.items(), key=operator.itemgetter(1), reverse=True)
return [index[0] for index in sorted_indices]
def annotate(self, doc_text, top_k=None, threshold=0.5):
"""
Annotate full text of clinical trial report
`top_k` can be overridden here, else defaults to the class
default set in __init__
"""
log.info('getting top k')
top_k = self.top_k if not top_k else top_k
doc_len = len(doc_text.text)
doc_sents = [sent.text for sent in doc_text.sents]
doc_sent_start_i = [sent.start_char for sent in doc_text.sents]
doc_sent_end_i = [sent.end_char for sent in doc_text.sents]
structured_data = []
#for domain, model in self.models.items():
log.info('starting modeling')
for domain in self.all_domains:
log.info('STARTING DOMAIN {}'.format(domain))
###
# linear model predictions (all domains)
#if type(model) == tuple: # linear model
log.info('doing linear predictions')
(vec, sent_clf, doc_clf) = (self.linear_vec, self.linear_sent_clf, self.linear_doc_clf)
doc_domains = [self.bias_domains[domain]] * len(doc_sents)
doc_X_i = zip(doc_sents, doc_domains)
vec.builder_clear()
vec.builder_add_docs(doc_sents)
vec.builder_add_docs(doc_X_i)
doc_sents_X = vec.builder_transform()
doc_sents_preds = sent_clf.decision_function(doc_sents_X)
linear_high_prob_sent_indices = np.argsort(doc_sents_preds)
###
# CNN predictions
log.info('doing cnn predictions')
bias_prob_CNN = None
if domain in self.CNN_models:
model = self.CNN_models[domain]
log.info('model selected for {}'.format(domain))
doc = Document(doc_id=None, sentences=doc_sents) # make consumable for RA-CNN
log.info('Doc done {}'.format(domain))
# this never comes back
bias_prob_CNN, high_prob_sent_indices_CNN = model.predict_and_rank_sentences_for_doc(doc, num_rationales=len(doc), return_rationale_indices=True)
log.info('got probs {}'.format(domain))
high_prob_sent_indices = self.simple_borda_count(high_prob_sent_indices_CNN,
linear_high_prob_sent_indices)[:top_k]
# and now the overall (doc-level) prediction from the CNN model.
# bias_prob = 1 --> low risk
# from riskofbias2:
# doc_y[mapped_domain] = 1 if domain["RATING"] == "YES" else -1
# # simplifying to LOW risk of bias = 1 *v* HIGH/UNKNOWN risk = -1
####
bias_pred = int(bias_prob_CNN >= threshold) # low risk if True and high/unclear otherwise
else:
# no aggregation here (since no CNN model for this domain)
high_prob_sent_indices = linear_high_prob_sent_indices[-top_k:]
high_prob_sent_indices = linear_high_prob_sent_indices[::-1] # put highest prob sentence first
#if domain == "BOA":
# high_prob_sents_CNN = [doc_sents[i] for i in high_prob_sent_indices_CNN]
# Find high probability sentences
#from celery.contrib import rdb
#rdb.set_trace()
high_prob_sents = [doc_sents[i] for i in high_prob_sent_indices]
high_prob_start_i = [doc_sent_start_i[i] for i in high_prob_sent_indices]
high_prob_end_i = [doc_sent_end_i[i] for i in high_prob_sent_indices]
high_prob_prefixes = [doc_text.text[max(0, offset-20):offset] for offset in high_prob_start_i]
high_prob_suffixes = [doc_text.text[offset: min(doc_len, offset+20)] for offset in high_prob_end_i]
high_prob_sents_j = " ".join(high_prob_sents)
# overall pred from linear model
vec.builder_clear()
vec.builder_add_docs([doc_text.text])
vec.builder_add_docs([(doc_text.text, self.bias_domains[domain])])
sent_domain_interaction = "-s-" + self.bias_domains[domain]
vec.builder_add_docs([(high_prob_sents_j, sent_domain_interaction)])
X = vec.builder_transform()
bias_prob_linear = doc_clf.predict_proba(X)[0]
# if we have a CNN pred, too, then average; otherwise
# rely on linear model.
bias_prob = bias_prob_linear
if bias_prob_CNN is not None:
bias_prob = (bias_prob_CNN + bias_prob_linear) / 2.0
bias_pred = int(bias_prob >= threshold)
bias_class = ["high/unclear", "low"][bias_pred] # prediction
annotation_metadata = []
for sent in zip(high_prob_sents, high_prob_start_i, high_prob_prefixes, high_prob_suffixes):
sent_metadata = {"content": sent[0],
"position": sent[1],
"uuid": str(uuid.uuid1()),
"prefix": sent[2],
"suffix": sent[3]}
annotation_metadata.append(sent_metadata)
structured_data.append({"domain": self.bias_domains[domain],
"judgement": bias_class,
"annotations": annotation_metadata})
return structured_data
def pdf_annotate(self, data):
log.info('retrieving text')
doc_text = data.get('parsed_text')
if not doc_text:
return data # we've got to know the text at least..
structured_data = self.annotate(doc_text)
data.ml["bias"] = structured_data
log.info('done predictions, ready to return answers')
return data
def api_annotate(self, articles):
if not all(('parsed_fullText' in article for article in articles)):
raise Exception('Bias model requires full text to be able to complete annotation')
annotations = []
for article in articles:
if article.get('skip_annotation'):
annotations.append([])
else:
annotations.append(self.annotate(article['parsed_fullText']))
# reformat annotations to API formatting
api_domain_titles = {
'Random sequence generation': 'random_sequence_generation',
'Allocation concealment': 'allocation_concealment',
'Blinding of participants and personnel': 'blinding_participants_personnel',
'Blinding of outcome assessment': 'blinding_outcome_assessment'}
out = []
for r in annotations:
row = {}
for b in r:
row[api_domain_titles[b['domain']]] = {
"judgement": b['judgement'],
"annotations": [{"text": an['content'], "start_index":an['position'] } for an in b['annotations']]
}
out.append(row)
return out
@staticmethod
def get_marginalia(data):
"""
Get marginalia formatted for Spa from structured data
"""
marginalia = []
for row in data['bias']:
marginalia.append({
"type": "Risk of Bias",
"title": row['domain'],
"annotations": row['annotations'],
"description": "**Overall risk of bias prediction**: {}".format(row['judgement'])
})
return marginalia
@staticmethod
def get_domains():
return [u'Random sequence generation',
u'Allocation concealment',
u'Blinding of participants and personnel',
u'Blinding of outcome assessment']
#u'Incomplete outcome data',
#u'Selective reporting']
| gpl-3.0 | -8,401,132,343,254,432,000 | 38.781341 | 236 | 0.577281 | false |
SnabbCo/neutron | neutron/db/migration/alembic_migrations/env.py | 1 | 3054 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from logging import config as logging_config
from alembic import context
from sqlalchemy import create_engine, pool
from neutron.db import model_base
from neutron.openstack.common import importutils
DATABASE_QUOTA_DRIVER = 'neutron.extensions._quotav2_driver.DbQuotaDriver'
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
neutron_config = config.neutron_config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
logging_config.fileConfig(config.config_file_name)
plugin_class_path = neutron_config.core_plugin
active_plugins = [plugin_class_path]
active_plugins += neutron_config.service_plugins
for class_path in active_plugins:
importutils.import_class(class_path)
# set the target for 'autogenerate' support
target_metadata = model_base.BASEV2.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=neutron_config.database.connection)
with context.begin_transaction():
context.run_migrations(active_plugins=active_plugins,
options=build_options())
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(
neutron_config.database.connection,
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations(active_plugins=active_plugins,
options=build_options())
finally:
connection.close()
def build_options():
return {'folsom_quota_db_enabled': is_db_quota_enabled()}
def is_db_quota_enabled():
return neutron_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| apache-2.0 | -1,243,438,621,249,044,200 | 28.650485 | 78 | 0.712508 | false |
zork9/pygame-pyMM | bombertoad.py | 1 | 3050 |
# Copyright (c) 2013 Johan Ceuppens.
# All rights reserved.
# Redistribution and use in source and binary forms are permitted
# provided that the above copyright notice and this paragraph are
# duplicated in all such forms and that any documentation,
# advertising materials, and other materials related to such
# distribution and use acknowledge that the software was developed
# by the Johan Ceuppens. The name of the
# Johan Ceuppens may not be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
# Copyright (C) Johan Ceuppens 2010
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
from pygame.locals import *
from gameobject import *
from bullet import *
from stateimagelibrary import *
import random
from time import *
from math import *
from random import *
from rng import *
class BomberToad(Gameobject):
"Dude on Toad throwing Bombs"
def __init__(self,xx,yy):
Gameobject.__init__(self, xx, yy)
self.w = 100
self.h = 100
self.hitpoints = 2
self.yy = yy
self.stimlib = Stateimagelibrary()
image = pygame.image.load('./pics/bomber-left-1.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-2.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-3.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-4.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
self.counter = 0
def draw(self, screen, room):
if randint(0,100) != 100 and self.counter == 0:
self.counter = 0
self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, 0)
else:
self.counter += 1
self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, self.counter)
if self.counter >= 3:
self.counter = 0
room.gameobjects.append(Bullet(self.x+room.relativex,self.y+room.relativey, "left"))
def update(self,room,player):
1
def fight(self,room,player,keydown = -1):
1
| gpl-2.0 | 8,758,164,902,192,934,000 | 34.882353 | 95 | 0.71377 | false |
MadsJensen/agency_connectivity | make_df_hilbert_data.py | 1 | 1383 | import numpy as np
import pandas as pd
import scipy.io as sio
from my_settings import *
data = sio.loadmat("/home/mje/Projects/agency_connectivity/Data/data_all.mat")[
"data_all"]
column_keys = ["subject", "trial", "condition", "shift"]
result_df = pd.DataFrame(columns=column_keys)
for k, subject in enumerate(subjects):
p8_invol_shift = data[k, 3] - np.mean(data[k, 0])
p8_vol_shift = data[k, 2] - np.mean(data[k, 0])
p8_vol_bs_shift = data[k, 1] - np.mean(data[k, 0])
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol_bs",
"shift": p8_vol_bs_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol",
"shift": p8_vol_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "invol",
"shift": p8_invol_shift[j][0]}])
result_df = result_df.append(row, ignore_index=True)
| bsd-3-clause | 5,829,760,773,845,085,000 | 32.731707 | 79 | 0.501808 | false |
jdf76/plugin.video.youtube | resources/lib/youtube_plugin/kodion/utils/http_server.py | 1 | 21426 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2018-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from six.moves import BaseHTTPServer
from six.moves.urllib.parse import parse_qs, urlparse
from six.moves import range
import json
import os
import re
import requests
import socket
import xbmc
import xbmcaddon
import xbmcgui
from .. import logger
class YouTubeRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(self.addon_id)
whitelist_ips = addon.getSetting('kodion.http.ip.whitelist')
whitelist_ips = ''.join(whitelist_ips.split())
self.whitelist_ips = whitelist_ips.split(',')
self.local_ranges = ('10.', '172.16.', '192.168.', '127.0.0.1', 'localhost', '::1')
self.chunk_size = 1024 * 64
try:
self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id).decode('utf-8')
except AttributeError:
self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def connection_allowed(self):
client_ip = self.client_address[0]
log_lines = ['HTTPServer: Connection from |%s|' % client_ip]
conn_allowed = client_ip.startswith(self.local_ranges)
log_lines.append('Local range: |%s|' % str(conn_allowed))
if not conn_allowed:
conn_allowed = client_ip in self.whitelist_ips
log_lines.append('Whitelisted: |%s|' % str(conn_allowed))
if not conn_allowed:
logger.log_debug('HTTPServer: Connection from |%s| not allowed' % client_ip)
else:
if self.path != '/ping':
logger.log_debug(' '.join(log_lines))
return conn_allowed
# noinspection PyPep8Naming
def do_GET(self):
addon = xbmcaddon.Addon('plugin.video.youtube')
dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true'
api_config_enabled = addon.getSetting('youtube.api.config.page') == 'true'
if self.path == '/client_ip':
client_json = json.dumps({"ip": "{ip}".format(ip=self.client_address[0])})
self.send_response(200)
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Content-Length', len(client_json))
self.end_headers()
self.wfile.write(client_json.encode('utf-8'))
if self.path != '/ping':
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
else:
if dash_proxy_enabled and self.path.endswith('.mpd'):
file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\'))
file_chunk = True
logger.log_debug('HTTPServer: Request file path |{file_path}|'.format(file_path=file_path.encode('utf-8')))
try:
with open(file_path, 'rb') as f:
self.send_response(200)
self.send_header('Content-Type', 'application/xml+dash')
self.send_header('Content-Length', os.path.getsize(file_path))
self.end_headers()
while file_chunk:
file_chunk = f.read(self.chunk_size)
if file_chunk:
self.wfile.write(file_chunk)
except IOError:
response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8'))
self.send_error(404, response)
elif api_config_enabled and self.path == '/api':
html = self.api_config_page()
html = html.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(html))
self.end_headers()
for chunk in self.get_chunks(html):
self.wfile.write(chunk)
elif api_config_enabled and self.path.startswith('/api_submit'):
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
xbmc.executebuiltin('Dialog.Close(addonsettings,true)')
old_api_key = addon.getSetting('youtube.api.key')
old_api_id = addon.getSetting('youtube.api.id')
old_api_secret = addon.getSetting('youtube.api.secret')
query = urlparse(self.path).query
params = parse_qs(query)
api_key = params.get('api_key', [None])[0]
api_id = params.get('api_id', [None])[0]
api_secret = params.get('api_secret', [None])[0]
if api_key and api_id and api_secret:
footer = i18n(30638)
else:
footer = u''
if re.search(r'api_key=(?:&|$)', query):
api_key = ''
if re.search(r'api_id=(?:&|$)', query):
api_id = ''
if re.search(r'api_secret=(?:&|$)', query):
api_secret = ''
updated = []
if api_key is not None and api_key != old_api_key:
addon.setSetting('youtube.api.key', api_key)
updated.append(i18n(30201))
if api_id is not None and api_id != old_api_id:
addon.setSetting('youtube.api.id', api_id)
updated.append(i18n(30202))
if api_secret is not None and api_secret != old_api_secret:
updated.append(i18n(30203))
addon.setSetting('youtube.api.secret', api_secret)
if addon.getSetting('youtube.api.key') and addon.getSetting('youtube.api.id') and \
addon.getSetting('youtube.api.secret'):
enabled = i18n(30636)
else:
enabled = i18n(30637)
if not updated:
updated = i18n(30635)
else:
updated = i18n(30631) % u', '.join(updated)
html = self.api_submit_page(updated, enabled, footer)
html = html.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(html))
self.end_headers()
for chunk in self.get_chunks(html):
self.wfile.write(chunk)
elif self.path == '/ping':
self.send_error(204)
else:
self.send_error(501)
# noinspection PyPep8Naming
def do_HEAD(self):
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
else:
addon = xbmcaddon.Addon('plugin.video.youtube')
dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true'
if dash_proxy_enabled and self.path.endswith('.mpd'):
file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\'))
if not os.path.isfile(file_path):
response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8'))
self.send_error(404, response)
else:
self.send_response(200)
self.send_header('Content-Type', 'application/xml+dash')
self.send_header('Content-Length', os.path.getsize(file_path))
self.end_headers()
else:
self.send_error(501)
# noinspection PyPep8Naming
def do_POST(self):
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
elif self.path.startswith('/widevine'):
license_url = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_url')
license_token = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_token')
if not license_url:
self.send_error(404)
return
if not license_token:
self.send_error(403)
return
size_limit = None
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
li_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Bearer %s' % license_token
}
result = requests.post(url=license_url, headers=li_headers, data=post_data, stream=True)
response_length = int(result.headers.get('content-length'))
content = result.raw.read(response_length)
content_split = content.split('\r\n\r\n'.encode('utf-8'))
response_header = content_split[0].decode('utf-8', 'ignore')
response_body = content_split[1]
response_length = len(response_body)
match = re.search(r'^Authorized-Format-Types:\s*(?P<authorized_types>.+?)\r*$', response_header, re.MULTILINE)
if match:
authorized_types = match.group('authorized_types').split(',')
logger.log_debug('HTTPServer: Found authorized formats |{authorized_fmts}|'.format(authorized_fmts=authorized_types))
fmt_to_px = {'SD': (1280 * 528) - 1, 'HD720': 1280 * 720, 'HD': 7680 * 4320}
if 'HD' in authorized_types:
size_limit = fmt_to_px['HD']
elif 'HD720' in authorized_types:
if xbmc.getCondVisibility('system.platform.android') == 1:
size_limit = fmt_to_px['HD720']
else:
size_limit = fmt_to_px['SD']
elif 'SD' in authorized_types:
size_limit = fmt_to_px['SD']
self.send_response(200)
if size_limit:
self.send_header('X-Limit-Video', 'max={size_limit}px'.format(size_limit=str(size_limit)))
for d in list(result.headers.items()):
if re.match('^[Cc]ontent-[Ll]ength$', d[0]):
self.send_header(d[0], response_length)
else:
self.send_header(d[0], d[1])
self.end_headers()
for chunk in self.get_chunks(response_body):
self.wfile.write(chunk)
else:
self.send_error(501)
# noinspection PyShadowingBuiltins
def log_message(self, format, *args):
return
def get_chunks(self, data):
for i in range(0, len(data), self.chunk_size):
yield data[i:i + self.chunk_size]
@staticmethod
def api_config_page():
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
api_key = addon.getSetting('youtube.api.key')
api_id = addon.getSetting('youtube.api.id')
api_secret = addon.getSetting('youtube.api.secret')
html = Pages().api_configuration.get('html')
css = Pages().api_configuration.get('css')
html = html.format(css=css, title=i18n(30634), api_key_head=i18n(30201), api_id_head=i18n(30202),
api_secret_head=i18n(30203), api_id_value=api_id, api_key_value=api_key,
api_secret_value=api_secret, submit=i18n(30630), header=i18n(30634))
return html
@staticmethod
def api_submit_page(updated_keys, enabled, footer):
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
html = Pages().api_submit.get('html')
css = Pages().api_submit.get('css')
html = html.format(css=css, title=i18n(30634), updated=updated_keys, enabled=enabled, footer=footer, header=i18n(30634))
return html
class Pages(object):
api_configuration = {
'html':
u'<!doctype html>\n<html>\n'
u'<head>\n\t<meta charset="utf-8">\n'
u'\t<title>{title}</title>\n'
u'\t<style>\n{css}\t</style>\n'
u'</head>\n<body>\n'
u'\t<div class="center">\n'
u'\t<h5>{header}</h5>\n'
u'\t<form action="/api_submit" class="config_form">\n'
u'\t\t<label for="api_key">\n'
u'\t\t<span>{api_key_head}</span><input type="text" name="api_key" value="{api_key_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<label for="api_id">\n'
u'\t\t<span>{api_id_head}</span><input type="text" name="api_id" value="{api_id_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<label for="api_secret">\n'
u'\t\t<span>{api_secret_head}</span><input type="text" name="api_secret" value="{api_secret_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<input type="submit" value="{submit}">\n'
u'\t</form>\n'
u'\t</div>\n'
u'</body>\n</html>',
'css':
u'body {\n'
u' background: #141718;\n'
u'}\n'
u'.center {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u'}\n'
u'.config_form {\n'
u' width: 575px;\n'
u' height: 145px;\n'
u' font-size: 16px;\n'
u' background: #1a2123;\n'
u' padding: 30px 30px 15px 30px;\n'
u' border: 5px solid #1a2123;\n'
u'}\n'
u'h5 {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' font-weight: 600;\n'
u' width: 575px;\n'
u' height: 20px;\n'
u' background: #0f84a5;\n'
u' padding: 5px 30px 5px 30px;\n'
u' border: 5px solid #0f84a5;\n'
u' margin: 0px;\n'
u'}\n'
u'.config_form input[type=submit],\n'
u'.config_form input[type=button],\n'
u'.config_form input[type=text],\n'
u'.config_form textarea,\n'
u'.config_form label {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u'}\n'
u'.config_form label {\n'
u' display:block;\n'
u' margin-bottom: 10px;\n'
u'}\n'
u'.config_form label > span {\n'
u' display: inline-block;\n'
u' float: left;\n'
u' width: 150px;\n'
u'}\n'
u'.config_form input[type=text] {\n'
u' background: transparent;\n'
u' border: none;\n'
u' border-bottom: 1px solid #147a96;\n'
u' width: 400px;\n'
u' outline: none;\n'
u' padding: 0px 0px 0px 0px;\n'
u'}\n'
u'.config_form input[type=text]:focus {\n'
u' border-bottom: 1px dashed #0f84a5;\n'
u'}\n'
u'.config_form input[type=submit],\n'
u'.config_form input[type=button] {\n'
u' width: 150px;\n'
u' background: #141718;\n'
u' border: none;\n'
u' padding: 8px 0px 8px 10px;\n'
u' border-radius: 5px;\n'
u' color: #fff;\n'
u' margin-top: 10px\n'
u'}\n'
u'.config_form input[type=submit]:hover,\n'
u'.config_form input[type=button]:hover {\n'
u' background: #0f84a5;\n'
u'}\n'
}
api_submit = {
'html':
u'<!doctype html>\n<html>\n'
u'<head>\n\t<meta charset="utf-8">\n'
u'\t<title>{title}</title>\n'
u'\t<style>\n{css}\t</style>\n'
u'</head>\n<body>\n'
u'\t<div class="center">\n'
u'\t<h5>{header}</h5>\n'
u'\t<div class="content">\n'
u'\t\t<span>{updated}</span>\n'
u'\t\t<span>{enabled}</span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<div class="textcenter">\n'
u'\t\t\t<span><small>{footer}</small></span>\n'
u'\t\t</div>\n'
u'\t</div>\n'
u'\t</div>\n'
u'</body>\n</html>',
'css':
u'body {\n'
u' background: #141718;\n'
u'}\n'
u'.center {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u'}\n'
u'.textcenter {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u' text-align: center;\n'
u'}\n'
u'.content {\n'
u' width: 575px;\n'
u' height: 145px;\n'
u' background: #1a2123;\n'
u' padding: 30px 30px 15px 30px;\n'
u' border: 5px solid #1a2123;\n'
u'}\n'
u'h5 {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' font-weight: 600;\n'
u' width: 575px;\n'
u' height: 20px;\n'
u' background: #0f84a5;\n'
u' padding: 5px 30px 5px 30px;\n'
u' border: 5px solid #0f84a5;\n'
u' margin: 0px;\n'
u'}\n'
u'span {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' display: block;\n'
u' float: left;\n'
u' width: 575px;\n'
u'}\n'
u'small {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 12px;\n'
u' color: #fff;\n'
u'}\n'
}
def get_http_server(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '0.0.0.0'
port = int(port) if port else 50152
try:
server = BaseHTTPServer.HTTPServer((address, port), YouTubeRequestHandler)
return server
except socket.error as e:
logger.log_debug('HTTPServer: Failed to start |{address}:{port}| |{response}|'.format(address=address, port=port, response=str(e)))
xbmcgui.Dialog().notification(addon.getAddonInfo('name'), str(e),
xbmc.translatePath('special://home/addons/{0!s}/icon.png'.format(addon.getAddonInfo('id'))),
5000, False)
return None
def is_httpd_live(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = '127.0.0.1' if address == '0.0.0.0' else address
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1'
port = int(port) if port else 50152
url = 'http://{address}:{port}/ping'.format(address=address, port=port)
try:
response = requests.get(url)
result = response.status_code == 204
if not result:
logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response=response.status_code))
return result
except:
logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response='failed'))
return False
def get_client_ip_address(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = '127.0.0.1' if address == '0.0.0.0' else address
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1'
port = int(port) if port else 50152
url = 'http://{address}:{port}/client_ip'.format(address=address, port=port)
response = requests.get(url)
ip_address = None
if response.status_code == 200:
response_json = response.json()
if response_json:
ip_address = response_json.get('ip')
return ip_address
| gpl-2.0 | 4,763,203,937,845,970,000 | 41.511905 | 146 | 0.519882 | false |
InnovArul/codesmart | Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_soft_epsilon.py | 1 | 3861 | from __future__ import print_function
import numpy as np
from grid import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
import matplotlib.pyplot as plt
from monte_carlo_exploring_starts import max_dict
EPS = 1e-4
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = {'U', 'D', 'L', 'R'}
def random_action(a, eps=0.1):
p = np.random.random()
if(p < 1 - eps):
return a
else:
return np.random.choice(list(ALL_POSSIBLE_ACTIONS))
# monte carlo sampling - finding out optimal policy (policy iteration)
def play_game(grid, policy):
all_states = list(grid.actions.keys())
state = (2, 0)
# instead of taking random action at first step, consider the action which is probabilistic with the policy
a = random_action(policy[state])
grid.set_state(state)
states_actions_rewards = [(state, a, 0)] # action is corresponding to the one which is going to be taken
while True:
r = grid.move(a)
state = grid.current_state()
#print(prev_state)
# if game over, break the loop
if grid.game_over():
states_actions_rewards.append((state, None, r)) # agent has hit the wall and we should not allow it to happen
break
else:
# collect the next action that we are gonna take and insert into the trace
a = random_action(policy[state])
states_actions_rewards.append((state, a, r))
# calculate the returns by working backwards from terminal state
G = 0
states_actions_returns = []
for i, state_action_reward in enumerate(reversed(states_actions_rewards)):
state, action, reward = state_action_reward
if i != 0:
states_actions_returns.append((state, action, G))
G = reward + GAMMA * G
states_actions_returns.reverse()
return states_actions_returns
def max_dict(hash):
max_key = None
max_val = float('-inf')
for k in hash:
if(hash[k] > max_val):
max_key, max_val = k, hash[k]
return max_key, max_val
if __name__ == '__main__':
#grid = standard_grid()
grid = negative_grid(-0.1)
print('grid')
print_values(grid.rewards, grid)
# init random policy
policy = {}
for s in grid.actions:
policy[s] = np.random.choice(list(ALL_POSSIBLE_ACTIONS))
print('policy')
print_policy(policy, grid)
# initialioze Q(s, a)
Q = {}
returns = {} # buffer to hold all the returns for a state during monte-carlo game plays
for s in grid.actions: # if state is non terminal
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
# for all the possible actions, initialize Q(s,a)
Q[s][a] = 0
returns[(s, a)] = []
# deltas
deltas = []
for sample in range(5000):
if sample % 500 == 0:
print(sample)
biggest_change = 0
# generate an episode and adapt Q(s, a)
states_actions_returns = play_game(grid, policy)
seen_states_actions = set()
for s, a, G in states_actions_returns:
key = (s, a)
if s not in seen_states_actions:
old_q = Q[s][a]
returns[key].append(G)
Q[s][a] = np.mean(returns[key])
seen_states_actions.add(key)
biggest_change = max(biggest_change, abs(G - old_q))
deltas.append(biggest_change)
# policy improvement
for s in Q:
policy[s] = max_dict(Q[s])[0]
plt.plot(deltas)
plt.show()
V = {}
# policy improvement
for s in Q:
V[s] = max_dict(Q[s])[1]
print('grid')
print_values(V, grid)
print('policy')
print_policy(policy, grid)
| gpl-2.0 | -5,864,667,669,027,085,000 | 29.164063 | 121 | 0.573168 | false |
naturalness/sensibility | sensibility/language/java/__init__.py | 1 | 6245 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import sys
import token
from io import BytesIO
from keyword import iskeyword
from pathlib import Path
from typing import (
Any, AnyStr, Callable, IO, Iterable, Optional, Tuple, Union,
overload, cast
)
import javac_parser
from .. import Language, SourceSummary
from ...lexical_analysis import Lexeme, Location, Position, Token
from ...vocabulary import NoSourceRepresentationError, Vocabulary, Vind
here = Path(__file__).parent
class JavaVocabulary(Vocabulary):
"""
The vocabulary, except it returns from
"""
first_entry_num = len(Vocabulary.SPECIAL_ENTRIES)
def __init__(self, entries: Iterable[str], reprs: Iterable[str]) -> None:
super().__init__(entries)
# Create a look-up table for source representations.
# The special tokens <unk>, <s>, </s> have NO reprs, thus are not
# stored.
self._index2repr = tuple(reprs)
assert len(self._index2text) == self.first_entry_num + len(self._index2repr)
def to_source_text(self, idx: Vind) -> str:
if idx < self.first_entry_num:
raise NoSourceRepresentationError(idx)
return self._index2repr[idx - self.first_entry_num]
@staticmethod
def load() -> 'JavaVocabulary':
entries = []
reprs = []
# Load from a tab-separated-values file
with open(here / 'vocabulary.tsv') as vocab_file:
first_entry = JavaVocabulary.first_entry_num
for expected_num, line in enumerate(vocab_file, start=first_entry):
# src_repr -- source representation
num, entry, src_repr = line.rstrip().split()
assert expected_num == int(num)
entries.append(entry)
reprs.append(src_repr)
return JavaVocabulary(entries, reprs)
def to_str(source: Union[str, bytes, IO[bytes]]) -> str:
"""
Coerce an input format to a Unicode string.
"""
if isinstance(source, str):
return source
elif isinstance(source, bytes):
# XXX: Assume it's UTF-8 encoded!
return source.decode('UTF-8')
else:
raise NotImplementedError
class LazyVocabulary:
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, value):
if not hasattr(self, 'value'):
self.value = self.fn()
return self.value
class JavaToken(Token):
"""
HACK: javac_parser has some... interesting ideas about normalization.
so add a `_raw` field to the token.
"""
# TODO: fix with upstream (javac_parser) to return a sensible value for the normalized value
__slots__ = ('_raw',)
def __init__(self, *, _raw: str, name: str, value: str, start: Position, end: Position) -> None:
super().__init__(name=name, value=value, start=start, end=end)
self._raw = _raw
def __repr__(self) -> str:
cls = type(self).__name__
return (f"{cls}(_raw={self._raw!r}"
f"name={self.name!r}, value={self.value!r}, "
f"start={self.start!r}, end={self.end!r})")
class Java(Language):
"""
Defines the Java 8 programming language.
"""
extensions = {'.java'}
vocabulary = cast(Vocabulary, LazyVocabulary(JavaVocabulary.load))
@property
def java(self):
"""
Lazily start up the Java server. This decreases the chances of things
going horribly wrong when two seperate process initialize
the Java language instance around the same time.
"""
if not hasattr(self, '_java_server'):
self._java_server = javac_parser.Java()
# Py4j usually crashes as Python is cleaning up after exit() so
# decrement the servers' reference count to lessen the chance of
# that happening.
@atexit.register
def remove_reference():
del self._java_server
return self._java_server
def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Iterable[Token]:
tokens = self.java.lex(to_str(source))
# Each token is a tuple with the following structure
# (reproduced from javac_parser.py):
# 1. Lexeme type
# 2. Value (as it appears in the source file)
# 3. A 2-tuple of start line, start column
# 4. A 2-tuple of end line, end column
# 5. A whitespace-free representation of the value
for name, raw_value, start, end, normalized in tokens:
# Omit the EOF token, as it's only useful for the parser.
if name == 'EOF':
continue
# Take the NORMALIZED value, as Java allows unicode escapes in
# ARBITRARY tokens and then things get hairy here.
yield JavaToken(_raw=raw_value,
name=name, value=normalized,
start=Position(line=start[0], column=start[1]),
end=Position(line=end[0], column=end[1]))
def check_syntax(self, source: Union[str, bytes]) -> bool:
return self.java.get_num_parse_errors(to_str(source)) == 0
def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary:
toks = [tok for tok in source if tok.name != 'EOF']
slines = set(line for tok in toks for line in tok.lines)
return SourceSummary(n_tokens=len(toks), sloc=len(slines))
def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]:
for token in source:
yield token.location, token.name
java: Language = Java()
| apache-2.0 | 170,523,511,138,746,530 | 33.888268 | 100 | 0.620657 | false |
postlund/pyatv | tests/support/test_dns.py | 1 | 10011 | """Unit tests for pyatv.support.dns"""
import io
import typing
import pytest
from pyatv.support import dns
@pytest.mark.parametrize(
"name,expected",
(
("_http._tcp.local", (None, "_http._tcp", "local")),
("foo._http._tcp.local", ("foo", "_http._tcp", "local")),
("foo.bar._http._tcp.local", ("foo.bar", "_http._tcp", "local")),
),
ids=("ptr", "no_dot", "with_dot"),
)
def test_happy_service_instance_names(name, expected):
assert dns.ServiceInstanceName.split_name(name) == expected
@pytest.mark.parametrize(
"name",
(
"_http.local",
"._tcp.local",
"_http.foo._tcp.local",
"_tcp._http.local",
),
ids=("no_proto", "no_service", "split", "reversed"),
)
def test_sad_service_instance_names(name):
with pytest.raises(ValueError):
dns.ServiceInstanceName.split_name(name)
# mapping is test_id: tuple(name, expected_raw)
encode_domain_names = {
"root": (".", b"\x00"),
"empty": ("", b"\x00"),
"example.com": ("example.com", b"\x07example\x03com\x00"),
"example.com_list": (["example", "com"], b"\x07example\x03com\x00"),
"unicode": ("Bücher.example", b"\x07B\xc3\xbccher\x07example\x00"),
"dotted_instance": (
"Dot.Within._http._tcp.example.local",
b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00",
),
"dotted_instance_list": (
["Dot.Within", "_http", "_tcp", "example", "local"],
b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00",
),
"truncated_ascii": (
(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
".test"
),
(
b"\x3fabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijk"
b"\x04test"
b"\x00"
),
),
"truncated_unicode": (
(
# The 'a' is at the beginning to force the codepoints to be split at 63
# bytes. The next line is also at the right length to be below 88 characters
# even if each kana is counted as a double-width character. Additionally,
# this sequence is NF*D* normalized, not NFC (which is what is used for
# Net-Unicode).
"aがあいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめも"
".test"
),
(
b"\x3d"
b"a\xe3\x81\x8c\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a"
b"\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x93\xe3\x81\x95"
b"\xe3\x81\x97\xe3\x81\x99\xe3\x81\x9b\xe3\x81\x9d\xe3\x81\x9f\xe3\x81\xa1"
b"\xe3\x81\xa4\xe3\x81\xa6"
b"\x04test"
b"\x00"
),
),
}
@pytest.mark.parametrize(
"name,expected_raw",
[pytest.param(*value, id=key) for key, value in encode_domain_names.items()],
)
def test_qname_encode(name, expected_raw):
assert dns.qname_encode(name) == expected_raw
# mapping is test_id: tuple(raw_name, offset, expected_name, expected_offset)
# If expected offset is None, it means len(raw_name), otherwise it's like an array index
# (positive is from the beginning, negative from the end)
decode_domain_names = {
"simple": (b"\x03foo\x07example\x03com\x00", 0, "foo.example.com", None),
"null": (b"\00", 0, "", None),
"compressed": (b"aaaa\x04test\x00\x05label\xC0\x04\xAB\xCD", 10, "label.test", -2),
# This case has two levels of compression
"multi_compressed": (
b"aaaa\x04test\x00\x05label\xC0\x04\x03foo\xC0\x0A\xAB\xCD",
18,
"foo.label.test",
-2,
),
# Taken straight from the Internationalized Domain name Wikipedia page
"idna": (b"\x0Dxn--bcher-kva\x07example\x00", 0, "bücher.example", None),
# Taken from issue #919. Apple puts a non-breaking space between "Apple" and "TV".
"nbsp": (
b"\x10Apple\xc2\xa0TV (4167)\x05local\x00",
0,
"Apple\xa0TV (4167).local",
None,
),
# This is a doozy of a test case; it's covering a couple different areas of Unicode,
# as well as exercising that DNS-SD allows dots in instance names.
"unicode": (
(
b"\x1d\xe5\xb1\x85\xe9\x96\x93 Apple\xc2\xa0TV. En Espa\xc3\xb1ol"
b"\x05local"
b"\x00"
),
0,
"居間 Apple TV. En Español.local",
None,
),
}
@pytest.mark.parametrize(
"raw_name,offset,expected_name,expected_offset",
[pytest.param(*value, id=key) for key, value in decode_domain_names.items()],
)
def test_domain_name_parsing(
raw_name: bytes,
offset: int,
expected_name: str,
expected_offset: typing.Optional[int],
):
with io.BytesIO(raw_name) as buffer:
buffer.seek(offset)
name = dns.parse_domain_name(buffer)
assert name == expected_name
if expected_offset is None:
assert buffer.tell() == len(raw_name)
else:
# if expected_offset is positive, this will wrap around to the beginning, if
# it's negative it won't.
raw_len = len(raw_name)
assert buffer.tell() == (raw_len + expected_offset) % raw_len
# mapping is test_id: tuple(encoded_data, expected_data, expected_offset)
# If expected offset is None, it means len(raw_name), otherwise it's like an array index
# (positive is from the beginning, negative from the end)
decode_strings = {
"null": (b"\x00", b"", None),
# 63 is significant because that's the max length for a domain label, but not a
# character-string (they have similar encodings).
"len_63": (b"\x3F" + (63 * b"0"), (63 * b"0"), None),
# For similar reasons as 63, 64 is significant because it would set only one of the
# flag bits for name compression if domain-name encoding is assumed.
"len_64": (b"\x40" + (64 * b"0"), (64 * b"0"), None),
# Ditto for 128, but the other flag
"len_128": (b"\x80" + (128 * b"0"), (128 * b"0"), None),
# ...and 192 is both flags
"len_192": (b"\xC0" + (192 * b"0"), (192 * b"0"), None),
# 255 is the max length a character-string can be
"len_255": (b"\xFF" + (255 * b"0"), (255 * b"0"), None),
"trailing": (b"\x0A" + (10 * b"2") + (17 * b"9"), (10 * b"2"), -17),
}
@pytest.mark.parametrize(
"encoded_data,expected_data,expected_offset",
[pytest.param(*value, id=key) for key, value in decode_strings.items()],
)
def test_string_parsing(
encoded_data: bytes,
expected_data: bytes,
expected_offset: typing.Optional[int],
):
with io.BytesIO(encoded_data) as buffer:
name = dns.parse_string(buffer)
assert name == expected_data
if expected_offset is None:
assert buffer.tell() == len(encoded_data)
else:
# if expected_offset is positive, this will wrap around to the beginning, if
# it's negative it won't.
data_len = len(encoded_data)
assert buffer.tell() == (data_len + expected_offset) % data_len
def test_dns_sd_txt_parse_single():
"""Test that a TXT RDATA section with one key can be parsed properly."""
data = b"\x07foo=bar"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"bar"}
def test_dns_sd_txt_parse_multiple():
"""Test that a TXT RDATA section with multiple keys can be parsed properly."""
data = b"\x07foo=bar\x09spam=eggs"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 2
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"bar", "spam": b"eggs"}
def test_dns_sd_txt_parse_binary():
"""Test that a TXT RDATA section with a binary value can be parsed properly."""
# 0xfeed can't be decoded as UTF-8 or ASCII, so it'll thrown an error if it's not
# being treated as binary data.
data = b"\x06foo=\xFE\xED"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"\xFE\xED"}
def test_dns_sd_txt_parse_long():
"""Test that a TXT RDATA section with a long value can be parsed properly."""
# If TXT records are being parsed the same way domain names are, this won't work as
# the data is too long to fit in a label.
data = b"\xCCfoo=" + b"\xCA\xFE" * 100
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"\xCA\xFE" * 100}
@pytest.mark.parametrize(
"record_type,data,expected",
[
(dns.QueryType.A, b"\x0A\x00\x00\x2A", "10.0.0.42"),
(dns.QueryType.PTR, b"\x03foo\x07example\x03com\x00", "foo.example.com"),
(dns.QueryType.TXT, b"\x07foo=bar", {"foo": b"bar"}),
(
dns.QueryType.SRV,
b"\x00\x0A\x00\x00\x00\x50\x03foo\x07example\x03com\x00",
{
"priority": 10,
"weight": 0,
"port": 80,
"target": "foo.example.com",
},
),
],
# Use the name of the record type as the test id
ids=(
t.name
for t in (
dns.QueryType.A,
dns.QueryType.PTR,
dns.QueryType.TXT,
dns.QueryType.SRV,
)
),
)
def test_parse_rdata(
record_type: dns.QueryType,
data: bytes,
expected: typing.Any,
):
with io.BytesIO(data) as buffer:
assert record_type.parse_rdata(buffer, len(data)) == expected
assert buffer.tell() == len(data)
| mit | 8,634,286,590,532,209,000 | 34.715827 | 88 | 0.591903 | false |
Ale-/civics | apps/models/migrations/0028_auto_20170924_1153.py | 1 | 1318 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-09-24 11:53
from __future__ import unicode_literals
import apps.models.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '0027_auto_20170922_1554'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.ImageField(blank=True, help_text='Sube una imagen representativa del evento haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/events/'), verbose_name='Imagen'),
),
migrations.AlterField(
model_name='initiative',
name='image',
field=models.ImageField(blank=True, help_text='Sube una imagen representativa de la iniciativa haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/initiatives/'), verbose_name='Imagen'),
),
]
| gpl-3.0 | -8,622,433,261,067,904,000 | 49.230769 | 394 | 0.688361 | false |
baixuexue123/djmo | utils/csv_response_.py | 1 | 2274 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import csv
import codecs
import cStringIO
from django.http import StreamingHttpResponse
from django.views.generic import View
"""
流式响应StreamingHttpResponse可以快速,节省内存地产生一个大型文件
"""
class Echo(object):
"""An object that implements just the write method of the file-like interface."""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwargs):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwargs)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([handle_column(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
value = self.stream.write(data)
# empty queue
self.queue.truncate(0)
return value
def writerows(self, rows):
for row in rows:
self.writerow(row)
class ExampleView(View):
headers = ('一些', '表头')
def get(self, request):
result = (
('第一行', '数据1'),
('第二行', '数据2')
)
echoer = Echo()
writer = UnicodeWriter(echoer)
def csv_iterator():
yield codecs.BOM_UTF8
yield writer.writerow(self.headers)
for column in result:
yield writer.writerow(column)
response = StreamingHttpResponse(
(row for row in csv_iterator()),
content_type="text/csv;charset=utf-8"
)
response['Content-Disposition'] = 'attachment;filename="example.csv"'
return response
| mit | -4,467,090,691,717,978,600 | 27.25641 | 85 | 0.599819 | false |
gunan/tensorflow | tensorflow/python/feature_column/feature_column_v2.py | 1 | 192558 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import re
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def has_resource(self, feature_column, name):
"""Returns true iff a resource with same name exists.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.has_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
if self._layer is not None and not hasattr(self._layer, '_resources'):
self._layer._resources = data_structures.Mapping() # pylint: disable=protected-access
self._cols_to_vars_map = collections.defaultdict(lambda: {})
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
if isinstance(var, variables.PartitionedVariable):
for v in var:
part_name = name + '/' + str(v._get_save_slice_info().var_offset[0]) # pylint: disable=protected-access
self._layer._track_trackable(v, feature_column.name + '/' + part_name) # pylint: disable=protected-access
else:
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, resource_name, resource):
self._cols_to_resources_map[feature_column][resource_name] = resource
# pylint: disable=protected-access
if self._layer is not None and isinstance(resource, trackable.Trackable):
# Add trackable resources to the layer for serialization.
if feature_column.name not in self._layer._resources:
self._layer._resources[feature_column.name] = data_structures.Mapping()
if resource_name not in self._layer._resources[feature_column.name]:
self._layer._resources[feature_column.name][resource_name] = resource
# pylint: enable=protected-access
def has_resource(self, feature_column, resource_name):
return resource_name in self._cols_to_resources_map[feature_column]
def get_resource(self, feature_column, resource_name):
if (feature_column not in self._cols_to_resources_map or
resource_name not in self._cols_to_resources_map[feature_column]):
raise ValueError('Resource does not exist.')
return self._cols_to_resources_map[feature_column][resource_name]
class _StateManagerImplV2(_StateManagerImpl):
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self,
feature_columns,
expected_column_type,
trainable,
name,
partitioner=None,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
self._partitioner = partitioner
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
self.name,
partitioner=self._partitioner):
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
config['partitioner'] = generic_utils.serialize_keras_object(
self._partitioner)
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
config_cp['partitioner'] = generic_utils.deserialize_keras_object(
config['partitioner'], custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope( # pylint: disable=protected-access
_sanitize_column_name_for_variable_scope(column.name)):
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=initializers.zeros(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=initializers.zeros(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(
_sanitize_column_name_for_variable_scope(column.name)):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
# TODO(tanzheny): Cleanup it with respect to Premade model b/132690565.
class LinearModel(keras_training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptually
are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(
None,
default_name=_sanitize_column_name_for_variable_scope(column.name)):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = tf.feature_column.categorical_column_with_vocabulary_file(...)
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=tf.feature_column.make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1 / math.sqrt(dimension))
return EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(
c0, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn, # pylint: disable=protected-access
fc_old._SequenceCategoricalColumn, SequenceCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup))
return result
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = initializers.truncated_normal(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
while isinstance(c0, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
while isinstance(c, (WeightedCategoricalColumn, SequenceCategoricalColumn)):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column or sequence column '
'of the same type. Given column: {} of type: {} does not match given '
'column: {} of type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name,
use_safe_embedding_lookup)
result = []
for column in categorical_columns:
result.append(
column_creator(
categorical_column=column, combiner=combiner, max_norm=max_norm))
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input bucketed by `boundaries`.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = tf.feature_column.numeric_column('price')
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
```
A `bucketized_column` can also be crossed with another categorical column
using `crossed_column`:
```python
price = tf.feature_column.numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = tf.feature_column.bucketized_column(
price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = tf.feature_column.crossed_column(
[bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = tf.keras.layers.DenseFeatures(columns)(features)
linear_model = tf.keras.experimental.LinearModel(units=...)(dense_tensor)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file, mode='rb') as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If set, values outside of range `[0, num_buckets)` will
be replaced with this value. If not set, values >= num_buckets will
cause a failure while values < 0 will be dropped.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda']))
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
Raises:
ValueError: If `categorical_column` is not CategoricalColumn type.
"""
if not isinstance(categorical_column,
(CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'Unsupported input type. Input must be a CategoricalColumn. '
'Given: {}'.format(categorical_column))
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
def __lt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
In CPython, `__lt__` must be defined for all objects in the
sequence being sorted.
If any objects in teh sequence being sorted do not have an `__lt__` method
compatible with feature column objects (such as strings), then CPython will
fall back to using the `__gt__` method below.
https://docs.python.org/3/library/stdtypes.html#list.sort
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically less
than the string representation of `other`. For FeatureColumn objects,
this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) < str(other)
def __gt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
`__gt__` is called when the "other" object being compared during the sort
does not have `__lt__` defined.
Example:
```
# __lt__ only class
class A():
def __lt__(self, other): return str(self) < str(other)
a = A()
a < "b" # True
"0" < a # Error
# __lt__ and __gt__ class
class B():
def __lt__(self, other): return str(self) < str(other)
def __gt__(self, other): return str(self) > str(other)
b = B()
b < "c" # True
"0" < b # True
```
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically
greater than the string representation of `other`. For FeatureColumn
objects, this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) > str(other)
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
def get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
return self._get_config()
def _get_config(self):
raise NotImplementedError('Must be implemented in subclasses.')
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
return cls._from_config(config, custom_objects, columns_by_name)
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
raise NotImplementedError('Must be implemented in subclasses.')
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.sparse.SparseTensor` of
IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key, state_manager, training=None):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
training: Boolean indicating whether to the column is being used in
training mode. This argument is passed to the transform_feature method
of any `FeatureColumn` that takes a `training` argument. For example, if
a `FeatureColumn` performed dropout, it could expose a `training`
argument to control whether the dropout should be applied.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
# Some columns may need information about whether the transformation is
# happening in training or prediction mode, but not all columns expose this
# argument.
try:
transformed = column.transform_feature(
self, state_manager, training=training)
except TypeError:
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable',
'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
dimension,
combiner,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
max_norm,
trainable,
use_safe_embedding_lookup=True):
return super(EmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
default_num_buckets = (self.categorical_column.num_buckets
if self._is_v2_column
else self.categorical_column._num_buckets) # pylint: disable=protected-access
num_buckets = getattr(self.categorical_column, 'num_buckets',
default_num_buckets)
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
if 'use_safe_embedding_lookup' not in config:
config['use_safe_embedding_lookup'] = True
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator',
use_safe_embedding_lookup=True):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
self._use_safe_embedding_lookup = use_safe_embedding_lookup
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self, categorical_column, combiner, max_norm):
return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm,
self._use_safe_embedding_lookup)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm', 'use_safe_embedding_lookup'))):
"""See `embedding_column`."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner,
max_norm,
use_safe_embedding_lookup=True):
return super(SharedEmbeddingColumn, cls).__new__(
cls,
categorical_column=categorical_column,
shared_embedding_column_creator=shared_embedding_column_creator,
combiner=combiner,
max_norm=max_norm,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
[self.shared_embedding_column_creator.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
sparse_id_rank = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse
if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and
sparse_id_rank <= 2):
embedding_lookup_sparse = (embedding_ops.embedding_lookup_sparse)
# Return embedding lookup result.
return embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
if state_manager is None or not state_manager.has_resource(self, name):
with ops.init_scope():
table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
else:
# Reuse the table from the previous run.
table = state_manager.get_resource(self, name)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
if state_manager is None or not state_manager.has_resource(self, name):
with ops.init_scope():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
else:
# Reuse the table from the previous run.
table = state_manager.get_resource(self, name)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = input_tensor.values
if input_tensor.values.dtype != dtypes.int64:
values = math_ops.cast(values, dtypes.int64, name='values')
if self.default_value is not None:
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
# Assign default for out-of-range values.
values = array_ops.where_v2(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
sparse_weight_tensor = self._transform_weight_tensor(weight_tensor)
sparse_categorical_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.categorical_column, state_manager))
return (sparse_categorical_tensor, sparse_weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair, size):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(size))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=size, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair,
self.variable_shape[-1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair,
self._variable_shape[-1])
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
def _sanitize_column_name_for_variable_scope(name):
"""Sanitizes user-provided feature names for use as variable scopes."""
invalid_char = re.compile('[^A-Za-z0-9_.\\-]')
return invalid_char.sub('_', name)
| apache-2.0 | -4,151,921,635,238,022,000 | 39.099542 | 128 | 0.674347 | false |
ClockworkOrigins/m2etis | configurator/quicktest/reporting/direct_null_1m_02-09-13.py | 1 | 2085 | __author__ = 'amw'
from quicktest.Reporting import plot3d, plot2d, create_dataset, create_plot_matrix
# Initialise database config
db_config = {"hostname": "localhost", "port": "27017", "db_name": "simulations", "collection_name": "02-09-13"}
# Create plot
tag = "direct_null_1m_020913"
x_param = "parameters.numSubs"
y_param = "parameters.numToSend_4"
z_param = "results.<Channel 4>: Latency - All nodes - Avgmean"
filter_list = [{"parameters.packetSize": "16B"},
{"parameters.packetSize": "32B"},
{"parameters.packetSize": "64B"},
{"parameters.packetSize": "128B"},
{"parameters.packetSize": "1024B"}
]
plot3d(db_config, tag, x_param, y_param, z_param, filter_list, 2, 3, "Direct Null: Average Latency to all nodes")
y_param = "results.<Channel 4>: Latency - All nodes - Avgmean"
filter_list = [{"parameters.packetSize": "16B", "parameters.numToSend_4": "1"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "3"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "5"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "7"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "10"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "15"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "20"},
{"parameters.packetSize": "16B", "parameters.numToSend_4": "30"}
]
# generate the 2d plots. the arguments "numCols" and "numRows" describe the layout of the resulting matrix of plots
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null: Average Latency to all nodes")
y_param = "results.<Channel 4>: Latency - All nodes - Minmean"
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null: Minimum Latency to all nodes")
y_param = "results.<Channel 4>: Latency - All nodes - Maxmean"
plot2d(db_config, tag, x_param, y_param, filter_list, 3, 3, "Direct Null; Maximum Latency to all nodes")
| apache-2.0 | -6,342,424,384,006,199,000 | 47.488372 | 115 | 0.631175 | false |
YaoQ/faceplusplus-demo | hello.py | 1 | 2906 | #!/usr/bin/env python2
# Import system libraries and define helper functions
import time
import sys
import os
import os.path
from pprint import pformat
# First import the API class from the SDK
from facepp import API
from facepp import File
def print_result(hint, result):
def encode(obj):
if type(obj) is unicode:
return obj.encode('utf-8')
if type(obj) is dict:
return {encode(k): encode(v) for (k, v) in obj.iteritems()}
if type(obj) is list:
return [encode(i) for i in obj]
return obj
print hint
result = encode(result)
print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')])
def init():
fdir = os.path.dirname(__file__)
with open(os.path.join(fdir, 'apikey.cfg')) as f:
exec(f.read())
srv = locals().get('SERVER')
return API(API_KEY, API_SECRET, srv = srv)
# In this tutorial, you will learn how to call Face ++ APIs and implement a
# simple App which could recognize a face image in 3 candidates.
api = init()
# Here are the person names and their face images
IMAGE_DIR = 'http://cn.faceplusplus.com/static/resources/python_demo/'
PERSONS = [
('Jim Parsons', IMAGE_DIR + '1.jpg'),
('Leonardo DiCaprio', IMAGE_DIR + '2.jpg'),
('Andy Liu', IMAGE_DIR + '3.jpg')
]
TARGET_IMAGE = IMAGE_DIR + '4.jpg'
# Step 1: Detect faces in the 3 pictures and find out their positions and
# attributes
FACES = {name: api.detection.detect(url = url)
for name, url in PERSONS}
for name, face in FACES.iteritems():
print_result(name, face)
# Step 2: create persons using the face_id
for name, face in FACES.iteritems():
rst = api.person.create(
person_name = name, face_id = face['face'][0]['face_id'])
print_result('create person {}'.format(name), rst)
# Step 3: create a new group and add those persons in it
rst = api.group.create(group_name = 'standard')
print_result('create group', rst)
rst = api.group.add_person(group_name = 'standard', person_name = FACES.iterkeys())
print_result('add these persons to group', rst)
# Step 4: train the model
rst = api.train.identify(group_name = 'standard')
print_result('train', rst)
# wait for training to complete
rst = api.wait_async(rst['session_id'])
print_result('wait async', rst)
# Step 5: recognize face in a new image
rst = api.recognition.identify(group_name = 'standard', url = TARGET_IMAGE)
print_result('recognition result', rst)
print '=' * 60
print 'The person with highest confidence:', \
rst['face'][0]['candidate'][0]['person_name']
# Finally, delete the persons and group because they are no longer needed
api.group.delete(group_name = 'standard')
api.person.delete(person_name = FACES.iterkeys())
# Congratulations! You have finished this tutorial, and you can continue
# reading our API document and start writing your own App using Face++ API!
# Enjoy :)
| gpl-2.0 | -6,944,040,991,084,908,000 | 31.288889 | 83 | 0.67309 | false |
noskill/virt-manager | virtManager/connect.py | 1 | 15892 | #
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import os
import logging
import socket
from gi.repository import Gio
from gi.repository import GObject
from gi.repository import Gtk
from . import uiutil
from .baseclass import vmmGObjectUI
(HV_QEMU,
HV_XEN,
HV_LXC,
HV_QEMU_SESSION,
HV_BHYVE) = range(5)
(CONN_SSH,
CONN_TCP,
CONN_TLS) = range(3)
def current_user():
try:
import getpass
return getpass.getuser()
except:
return ""
def default_conn_user(conn):
if conn == CONN_SSH:
return "root"
return current_user()
class vmmConnect(vmmGObjectUI):
__gsignals__ = {
"completed": (GObject.SignalFlags.RUN_FIRST, None, [str, bool]),
"cancelled": (GObject.SignalFlags.RUN_FIRST, None, []),
}
def __init__(self):
vmmGObjectUI.__init__(self, "connect.ui", "vmm-open-connection")
self.builder.connect_signals({
"on_hypervisor_changed": self.hypervisor_changed,
"on_transport_changed": self.transport_changed,
"on_hostname_combo_changed": self.hostname_combo_changed,
"on_connect_remote_toggled": self.connect_remote_toggled,
"on_username_entry_changed": self.username_changed,
"on_hostname_changed": self.hostname_changed,
"on_cancel_clicked": self.cancel,
"on_connect_clicked": self.open_conn,
"on_vmm_open_connection_delete_event": self.cancel,
})
self.browser = None
self.browser_sigs = []
# Set this if we can't resolve 'hostname.local': means avahi
# prob isn't configured correctly, and we should strip .local
self.can_resolve_local = None
# Plain hostname resolve failed, means we should just use IP addr
self.can_resolve_hostname = None
self.set_initial_state()
self.dbus = None
self.avahiserver = None
try:
self.dbus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
self.avahiserver = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", "/",
"org.freedesktop.Avahi.Server", None)
except Exception, e:
logging.debug("Couldn't contact avahi: %s", str(e))
self.reset_state()
@staticmethod
def default_uri(always_system=False):
if os.path.exists('/var/lib/xen'):
if (os.path.exists('/dev/xen/evtchn') or
os.path.exists("/proc/xen")):
return 'xen:///'
if (os.path.exists("/usr/bin/qemu") or
os.path.exists("/usr/bin/qemu-kvm") or
os.path.exists("/usr/bin/kvm") or
os.path.exists("/usr/libexec/qemu-kvm")):
if always_system or os.geteuid() == 0:
return "qemu:///system"
else:
return "qemu:///session"
return None
def cancel(self, ignore1=None, ignore2=None):
logging.debug("Cancelling open connection")
self.close()
self.emit("cancelled")
return 1
def close(self, ignore1=None, ignore2=None):
logging.debug("Closing open connection")
self.topwin.hide()
if self.browser:
for obj, sig in self.browser_sigs:
obj.disconnect(sig)
self.browser_sigs = []
self.browser = None
def show(self, parent, reset_state=True):
logging.debug("Showing open connection")
if reset_state:
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
self.start_browse()
def _cleanup(self):
pass
def set_initial_state(self):
self.widget("connect").grab_default()
combo = self.widget("hypervisor")
model = Gtk.ListStore(str)
model.append(["QEMU/KVM"])
model.append(["Xen"])
model.append(["LXC (Linux Containers)"])
model.append(["QEMU/KVM user session"])
if self.config.with_bhyve:
model.append(["Bhyve"])
combo.set_model(model)
uiutil.set_combo_text_column(combo, 0)
combo = self.widget("transport")
model = Gtk.ListStore(str)
model.append(["SSH"])
model.append(["TCP (SASL, Kerberos)"])
model.append(["SSL/TLS with certificates"])
combo.set_model(model)
uiutil.set_combo_text_column(combo, 0)
# Hostname combo box entry
hostListModel = Gtk.ListStore(str, str, str)
host = self.widget("hostname")
host.set_model(hostListModel)
host.set_entry_text_column(2)
hostListModel.set_sort_column_id(2, Gtk.SortType.ASCENDING)
def reset_state(self):
self.set_default_hypervisor()
self.widget("transport").set_active(0)
self.widget("autoconnect").set_sensitive(True)
self.widget("autoconnect").set_active(True)
self.widget("hostname").get_model().clear()
self.widget("hostname").get_child().set_text("")
self.widget("connect-remote").set_active(False)
self.widget("username-entry").set_text("")
self.connect_remote_toggled(self.widget("connect-remote"))
self.populate_uri()
def is_remote(self):
# Whether user is requesting a remote connection
return self.widget("connect-remote").get_active()
def set_default_hypervisor(self):
default = self.default_uri(always_system=True)
if not default or default.startswith("qemu"):
self.widget("hypervisor").set_active(HV_QEMU)
elif default.startswith("xen"):
self.widget("hypervisor").set_active(HV_XEN)
def add_service(self, interface, protocol, name, typ, domain, flags):
ignore = flags
try:
# Async service resolving
res = self.avahiserver.ServiceResolverNew("(iisssiu)",
interface, protocol,
name, typ, domain, -1, 0)
resint = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", res,
"org.freedesktop.Avahi.ServiceResolver",
None)
def cb(proxy, sender, signal, args):
ignore = proxy
ignore = sender
if signal == "Found":
self.add_conn_to_list(*args)
sig = resint.connect("g-signal", cb)
self.browser_sigs.append((resint, sig))
except Exception, e:
logging.exception(e)
def remove_service(self, interface, protocol, name, typ, domain, flags):
ignore = domain
ignore = protocol
ignore = flags
ignore = interface
ignore = typ
try:
model = self.widget("hostname").get_model()
name = str(name)
for row in model:
if row[0] == name:
model.remove(row.iter)
except Exception, e:
logging.exception(e)
def add_conn_to_list(self, interface, protocol, name, typ, domain,
host, aprotocol, address, port, text, flags):
ignore = domain
ignore = protocol
ignore = flags
ignore = interface
ignore = typ
ignore = text
ignore = aprotocol
ignore = port
try:
model = self.widget("hostname").get_model()
for row in model:
if row[2] == str(name):
# Already present in list
return
host = self.sanitize_hostname(str(host))
model.append([str(address), str(host), str(name)])
except Exception, e:
logging.exception(e)
def start_browse(self):
if self.browser or not self.avahiserver:
return
# Call method to create new browser, and get back an object path for it.
interface = -1 # physical interface to use? -1 is unspec
protocol = 0 # 0 = IPv4, 1 = IPv6, -1 = Unspecified
service = '_libvirt._tcp' # Service name to poll for
flags = 0 # Extra option flags
domain = "" # Domain to browse in. NULL uses default
bpath = self.avahiserver.ServiceBrowserNew("(iissu)",
interface, protocol,
service, domain, flags)
# Create browser interface for the new object
self.browser = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", bpath,
"org.freedesktop.Avahi.ServiceBrowser",
None)
def cb(proxy, sender, signal, args):
ignore = proxy
ignore = sender
if signal == "ItemNew":
self.add_service(*args)
elif signal == "ItemRemove":
self.remove_service(*args)
self.browser_sigs.append((self.browser,
self.browser.connect("g-signal", cb)))
def hostname_combo_changed(self, src):
model = src.get_model()
txt = src.get_child().get_text()
row = None
for currow in model:
if currow[2] == txt:
row = currow
break
if not row:
return
ip = row[0]
host = row[1]
entry = host
if not entry:
entry = ip
self.widget("hostname").get_child().set_text(entry)
def hostname_changed(self, src_ignore):
self.populate_uri()
def hypervisor_changed(self, src):
is_session = (src.get_active() == HV_QEMU_SESSION)
uiutil.set_grid_row_visible(
self.widget("session-warning-box"), is_session)
uiutil.set_grid_row_visible(
self.widget("connect-remote"), not is_session)
uiutil.set_grid_row_visible(
self.widget("username-entry"), not is_session)
uiutil.set_grid_row_visible(
self.widget("hostname"), not is_session)
uiutil.set_grid_row_visible(
self.widget("transport"), not is_session)
if is_session:
self.widget("connect-remote").set_active(False)
self.populate_uri()
def username_changed(self, src_ignore):
self.populate_uri()
def connect_remote_toggled(self, src_ignore):
is_remote = self.is_remote()
self.widget("hostname").set_sensitive(is_remote)
self.widget("transport").set_sensitive(is_remote)
self.widget("autoconnect").set_active(not is_remote)
self.widget("username-entry").set_sensitive(is_remote)
self.populate_default_user()
self.populate_uri()
def transport_changed(self, src_ignore):
self.populate_default_user()
self.populate_uri()
def populate_uri(self):
uri = self.generate_uri()
self.widget("uri-entry").set_text(uri)
def populate_default_user(self):
conn = self.widget("transport").get_active()
default_user = default_conn_user(conn)
self.widget("username-entry").set_text(default_user)
def generate_uri(self):
hv = self.widget("hypervisor").get_active()
conn = self.widget("transport").get_active()
host = self.widget("hostname").get_child().get_text().strip()
user = self.widget("username-entry").get_text()
is_remote = self.is_remote()
hvstr = ""
if hv == HV_XEN:
hvstr = "xen"
elif hv == HV_QEMU or hv == HV_QEMU_SESSION:
hvstr = "qemu"
elif hv == HV_BHYVE:
hvstr = "bhyve"
else:
hvstr = "lxc"
addrstr = ""
if user:
addrstr += user + "@"
addrstr += host
hoststr = ""
if not is_remote:
hoststr = ":///"
else:
if conn == CONN_TLS:
hoststr = "+tls://"
if conn == CONN_SSH:
hoststr = "+ssh://"
if conn == CONN_TCP:
hoststr = "+tcp://"
hoststr += addrstr + "/"
uri = hvstr + hoststr
if hv in (HV_QEMU, HV_BHYVE):
uri += "system"
elif hv == HV_QEMU_SESSION:
uri += "session"
return uri
def validate(self):
is_remote = self.is_remote()
host = self.widget("hostname").get_child().get_text()
if is_remote and not host:
return self.err.val_err(_("A hostname is required for "
"remote connections."))
return True
def open_conn(self, ignore):
if not self.validate():
return
auto = False
if self.widget("autoconnect").get_sensitive():
auto = self.widget("autoconnect").get_active()
uri = self.generate_uri()
logging.debug("Generate URI=%s, auto=%s", uri, auto)
self.close()
self.emit("completed", uri, auto)
def sanitize_hostname(self, host):
if host == "linux" or host == "localhost":
host = ""
if host.startswith("linux-"):
tmphost = host[6:]
try:
long(tmphost)
host = ""
except ValueError:
pass
if host:
host = self.check_resolve_host(host)
return host
def check_resolve_host(self, host):
# Try to resolve hostname
#
# Avahi always uses 'hostname.local', but for some reason
# fedora 12 out of the box can't resolve '.local' names
# Attempt to resolve the name. If it fails, remove .local
# if present, and try again
if host.endswith(".local"):
if self.can_resolve_local is False:
host = host[:-6]
elif self.can_resolve_local is None:
try:
socket.getaddrinfo(host, None)
except:
logging.debug("Couldn't resolve host '%s'. Stripping "
"'.local' and retrying.", host)
self.can_resolve_local = False
host = self.check_resolve_host(host[:-6])
else:
self.can_resolve_local = True
else:
if self.can_resolve_hostname is False:
host = ""
elif self.can_resolve_hostname is None:
try:
socket.getaddrinfo(host, None)
except:
logging.debug("Couldn't resolve host '%s'. Disabling "
"host name resolution, only using IP addr",
host)
self.can_resolve_hostname = False
else:
self.can_resolve_hostname = True
return host
| gpl-2.0 | 1,945,841,404,944,853,200 | 32.527426 | 80 | 0.543418 | false |
sravangottapu/Ip_Scanner | ip_scanner.py | 1 | 1187 | import threading
import time
import os
import re
import shlex
import _thread
import sys
import subprocess
alive = True
f = open("list.txt","w")
class myThread(threading.Thread):
def __init__(self,var,ip):
threading.Thread.__init__(self)
self.var = var
self.ip = ip
def run(self):
if(alive):
ping_ip(self.var,self.ip)
#self._stop.set()
print("Thread Exited")
def ping_ip(cmd,ip):
try:
output = subprocess.check_output(cmd)
f.write(ip)
f.write("\n")
print(ip + "Reachable")
except:
print(ip + "Not Reachable")
first = input("Enter the first Ip")
second = input("Enter the second Ip")
first = int(first)
second = int(second)
ping = "ping "
c1 = "-c1 "
start = time.time()
cmd_no_ip = ping + c1
t_end = time.time() + 2
for i in range(first,second):
ip = "172.16.114."+str(i)
cmd = cmd_no_ip + ip
cmd = shlex.split(cmd)
try:
thread1 = myThread(cmd,ip)
thread1.start()
thread1.join(1)
except:
print("Not thread" + ip)
end = time.time()
end = end - start
alive = False
print("Total Time" + str(end))
sys.exit()
quit()
| gpl-3.0 | 6,060,178,347,985,288,000 | 21.396226 | 45 | 0.581297 | false |
mbj4668/pyang | pyang/plugins/threegpp.py | 1 | 11115 | """3GPP usage guidelines plugin
See 3GPP TS 32.160 clause 6.2
Copyright Ericsson 2020
Author [email protected]
Revision 2020-11-25
Checks implemented
6.2.1.2 Module name starts with _3gpp-
6.2.1.3 namespace pattern urn:3gpp:sa5:<module-name>
6.2.1.4-a prefix ends with 3gpp
6.2.1.4-b prefix.length <= 10 char
6.2.1.5 yang 1.1 missing
6.2.1.5 yang 1.1 incorrect
6.2.1.6-a anydata
6.2.1.6-b anyxml
6.2.1.6-c rpc
6.2.1.6-d deviation
6.2.1.9 description not needed for enum, bit, choice, container,
leaf-list, leaf, typedef, grouping, augment, uses
6.2.1.b-a module-description-missing
6.2.1.b-b module-organization-missing
6.2.1.b-c module-organization includes 3gpp
6.2.1.b-d module-contact-missing
6.2.1.b-d module-contact-incorrect
6.2.1.c module-reference-missing
6.2.1.c module-reference-incorrect
6.2.1.d-a module-revision-missing
6.2.1.d-a module-revision-reference-missing
6.2.1.e default meaning
6.2.1.f-a linelength > 80
6.2.1.f-b no-tabs
6.2.1.f-c no-strange-chars
6.2.1.f-d no-CR-chars
6.2-a no-containers
"""
import optparse
import re
import io
import sys
from pyang import plugin
from pyang import statements
from pyang import error
from pyang.error import err_add
from pyang.plugins import lint
def pyang_plugin_init():
plugin.register_plugin(THREEGPPlugin())
class THREEGPPlugin(lint.LintPlugin):
def __init__(self):
lint.LintPlugin.__init__(self)
self.modulename_prefixes = ['_3gpp']
def add_opts(self, optparser):
optlist = [
optparse.make_option("--3gpp",
dest="threegpp",
action="store_true",
help="Validate the module(s) according to " \
"3GPP rules."),
]
optparser.add_options(optlist)
def setup_ctx(self, ctx):
if not ctx.opts.threegpp:
return
self._setup_ctx(ctx)
error.add_error_code(
'3GPP_BAD_NAMESPACE_VALUE', 3,
'3GPP: the namespace should be urn:3gpp:sa5:%s')
statements.add_validation_fun(
'grammar', ['namespace'],
lambda ctx, s: self.v_chk_namespace(ctx, s))
error.add_error_code(
'3GPP_BAD_PREFIX_VALUE', 3,
'3GPP: the prefix should end with 3gpp')
error.add_error_code(
'3GPP_TOO_LONG_PREFIX', 3,
'3GPP: the prefix should not be longer than 13 characters')
statements.add_validation_fun(
'grammar', ['prefix'],
lambda ctx, s: self.v_chk_prefix(ctx, s))
error.add_error_code(
'3GPP_BAD_YANG_VERSION', 3,
'3GPP: the yang-version should be 1.1')
statements.add_validation_fun(
'grammar', ['yang-version'],
lambda ctx, s: self.v_chk_yang_version(ctx, s))
# check that yang-version is present. If not,
# it defaults to 1. which is bad for 3GPP
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_yang_version_present(ctx, s))
error.add_error_code(
'3GPP_STATEMENT_NOT_ALLOWED', 3,
('3GPP: YANG statements anydata, anyxml, deviation, rpc '
'should not be used'))
statements.add_validation_fun(
'grammar', ['anydata' , 'anyxml' , 'deviation' , 'rpc'],
lambda ctx, s: self.v_chk_not_allowed_statements(ctx, s))
error.add_error_code(
'3GPP_BAD_ORGANIZATION', 3,
'3GPP: organization statement must include 3GPP')
statements.add_validation_fun(
'grammar', ['organization'],
lambda ctx, s: self.v_chk_organization(ctx, s))
error.add_error_code(
'3GPP_BAD_CONTACT', 3,
'3GPP: incorrect contact statement')
statements.add_validation_fun(
'grammar', ['contact'],
lambda ctx, s: self.v_chk_contact(ctx, s))
error.add_error_code(
'3GPP_MISSING_MODULE_REFERENCE', 3,
'3GPP: the module should have a reference substatement')
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_module_reference_present(ctx, s))
error.add_error_code(
'3GPP_BAD_MODULE_REFERENCE', 3,
'3GPP: the module\'s reference substatement is incorrect')
statements.add_validation_fun(
'grammar', ['reference'],
lambda ctx, s: self.v_chk_module_reference(ctx, s))
error.add_error_code(
'3GPP_TAB_IN_FILE', 3,
'3GPP: tab characters should not be used in YANG modules')
error.add_error_code(
'3GPP_WHITESPACE_AT_END_OF_LINE', 3,
'3GPP: extra whitespace should not be added at the end of the line')
error.add_error_code(
'3GPP_LONG_LINE', 3,
'3GPP: line longer than 80 characters')
error.add_error_code(
'3GPP_CR_IN_FILE', 3,
('3GPP: Carriage-return characters should not be used. '
'End-of-line should be just one LF character'))
error.add_error_code(
'3GPP_NON_ASCII', 4,
'3GPP: the module should only use ASCII characters')
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_3gpp_format(ctx, s))
error.add_error_code(
'3GPP_LIMITED_CONTAINER_USE', 4,
('3GPP: containers should only be used to contain the attributes '
'of a class'))
statements.add_validation_fun(
'grammar', ['container'],
lambda ctx, s: self.v_chk_limited_container_use(ctx, s))
def pre_validate_ctx(self, ctx, modules):
if ctx.opts.threegpp:
ctx.canonical = False
return
def v_chk_namespace(self, ctx, stmt):
r = 'urn:3gpp:sa5:' + stmt.i_module.arg +'$'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_NAMESPACE_VALUE',
stmt.i_module.arg)
def v_chk_prefix(self, ctx, stmt):
if stmt.parent.keyword != 'module' :
return
r = '.+3gpp$'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_PREFIX_VALUE',())
if len(stmt.arg) > 13 :
err_add(ctx.errors, stmt.pos, '3GPP_TOO_LONG_PREFIX',())
def v_chk_yang_version_present(self, ctx, stmt):
yang_version_present = False
for stmt in stmt.substmts:
if stmt.keyword == 'yang-version' :
yang_version_present = True
if not(yang_version_present) :
err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',())
def v_chk_yang_version(self, ctx, stmt):
r = '1.1'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',())
def v_chk_not_allowed_statements(self, ctx, stmt):
err_add(ctx.errors, stmt.pos, '3GPP_STATEMENT_NOT_ALLOWED',())
def v_chk_organization(self, ctx, stmt):
r = '3GPP'
if re.search(r, stmt.arg, re.IGNORECASE) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_ORGANIZATION',())
def v_chk_contact(self, ctx, stmt):
if stmt.arg != ('https://www.3gpp.org/DynaReport/'
'TSG-WG--S5--officials.htm?Itemid=464'):
err_add(ctx.errors, stmt.pos, '3GPP_BAD_CONTACT',())
def v_chk_module_reference_present(self, ctx, stmt):
module_reference_present = False
for stmt in stmt.substmts:
if stmt.keyword == 'reference' :
module_reference_present = True
if not(module_reference_present) :
err_add(ctx.errors, stmt.pos, '3GPP_MISSING_MODULE_REFERENCE',())
def v_chk_module_reference(self, ctx, stmt):
if stmt.parent.keyword != 'module' :
return
if not(stmt.arg.startswith('3GPP TS ')) :
err_add(ctx.errors, stmt.pos, '3GPP_BAD_MODULE_REFERENCE',())
def v_chk_3gpp_format(self, ctx, stmt):
if (not(stmt.arg.startswith("_3gpp"))):
return
filename = stmt.pos.ref
try:
fd = io.open(filename, "r", encoding="utf-8", newline='')
pos = error.Position(stmt.pos.ref)
pos.top = stmt
lineno = 0
for line in fd:
lineno += 1
pos.line = lineno
# no tabs
if (line.find('\t') != -1 ):
err_add(ctx.errors, pos, '3GPP_TAB_IN_FILE',())
# no whitespace after the line
# removed for now as there are just too many of these
# errors
# if (re.search('.*\s+\n',line) != None ):
# err_add(ctx.errors, self.pos,
# '3GPP_WHITESPACE_AT_END_OF_LINE',())
# lines shorter then 80 char
if (len(line) > 82 ):
err_add(ctx.errors, pos, '3GPP_LONG_LINE',())
# EOL should be just NL no CR
if (line.find('\r') != -1 ):
err_add(ctx.errors, pos, '3GPP_CR_IN_FILE',())
# only us-ascii chars
try:
line.encode('ascii')
except UnicodeEncodeError:
err_add(ctx.errors, pos, '3GPP_NON_ASCII',())
except IOError as ex:
sys.stderr.write("error %s: %s\n" % (filename, ex))
sys.exit(1)
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
sys.stderr.write("%s: unicode error: %s\n" % (filename, s))
sys.exit(1)
def v_chk_limited_container_use(self, ctx, stmt):
if stmt.arg != 'attributes' or stmt.parent.keyword != 'list' :
err_add(ctx.errors, stmt.pos, '3GPP_LIMITED_CONTAINER_USE',())
def post_validate_ctx(self, ctx, modules):
if not ctx.opts.threegpp:
return
"""Remove some lint errors that 3GPP considers acceptable"""
for ctx_error in ctx.errors[:]:
if ((ctx_error[1] == "LINT_MISSING_REQUIRED_SUBSTMT"
or ctx_error[1] == "LINT_MISSING_RECOMMENDED_SUBSTMT")
and ctx_error[2][2] == 'description'
and (ctx_error[2][1] == 'enum'
or ctx_error[2][1] == 'bit'
or ctx_error[2][1] == 'choice'
or ctx_error[2][1] == 'container'
or ctx_error[2][1] == 'leaf-list'
or ctx_error[2][1] == 'leaf'
or ctx_error[2][1] == 'typedef'
or ctx_error[2][1] == 'grouping'
or ctx_error[2][1] == 'augment'
or ctx_error[2][1] == 'uses')):
# remove error from ctx
ctx.errors.remove(ctx_error)
return
| isc | 1,029,402,971,902,015,200 | 34.970874 | 79 | 0.544939 | false |
silvio/elbe | elbepack/xmldefaults.py | 1 | 4019 |
import random
import string
import sys
armel_defaults = {
"arch": "armel",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb",
"nicmodel": "smc91c111"
}
armel_virtio_defaults = {
"arch": "armel",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm-virtio",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb",
"nicmodel": "smc91c111"
}
armhf_defaults = {
"arch": "armhf",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb -cpu cortex-a9",
"nicmodel": "smc91c111"
}
armhf_virtio_defaults = {
"arch": "armhf",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm-virtio",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb -cpu cortex-a9",
"nicmodel": "virtio"
}
ppc_defaults = {
"arch": "powerpc",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-ppc",
"userinterpr": "qemu-ppc-static",
"console": "ttyPZ0,115200n1",
"machine": "mac99",
"nicmodel": "rtl8139"
}
amd64_defaults = {
"arch": "amd64",
"size": "20G",
"mem": "1024",
"interpreter": "kvm",
"console": "ttyS0,115200n1",
"machine": "pc",
"nicmodel": "virtio"
}
i386_defaults = {
"arch": "i386",
"size": "20G",
"mem": "1024",
"interpreter": "kvm",
"console": "ttyS0,115200n1",
"machine": "pc",
"nicmodel": "virtio"
}
defaults = { "armel": armel_defaults,
"armel-virtio": armel_virtio_defaults,
"armhf": armhf_defaults,
"armhf-virtio": armhf_virtio_defaults,
"ppc": ppc_defaults,
"amd64": amd64_defaults,
"i386": i386_defaults,
"nodefaults": {} }
xml_field_path = {
"arch": "project/buildimage/arch",
"size": "project/buildimage/size",
"mem": "project/buildimage/mem",
"interpreter": "project/buildimage/interpreter",
"console": "project/buildimage/console",
"machine": "project/buildimage/machine",
"nicmodel": "project/buildimage/NIC/model"
}
def get_random_mac():
binaddr = [random.randint(0,256) for i in range(6) ]
binaddr[0] &= 0xfe
binaddr[0] |= 0x02
s = map( lambda x: "%02x" % x, binaddr )
return string.join( s, ":" )
class ElbeDefaults(object):
def __init__(self, build_type):
if not defaults.has_key(build_type):
print "Please specify a valid buildtype."
print "Valid buildtypes:"
print defaults.keys()
sys.exit(20)
self.defaults = defaults[build_type]
self.defaults["nicmac"] = get_random_mac()
def __getitem__( self, key ):
if self.defaults.has_key( key ):
return self.defaults[key]
print "No Default value has been Provided"
print "Either use a valid buildtype, or provide the field in the xml File."
print "The location in the xml is here:"
print xml_field_path[key]
sys.exit(20)
| gpl-3.0 | 1,458,241,890,226,960,400 | 28.123188 | 91 | 0.464543 | false |
TeamLocker/Server | TeamLocker_Server/protobufs/Libsodium_pb2.py | 1 | 3197 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/Libsodium.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/Libsodium.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x19protobufs/Libsodium.proto\"R\n\rLibsodiumItem\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\r\n\x05nonce\x18\x02 \x01(\x0c\x12\x11\n\tops_limit\x18\x03 \x01(\x03\x12\x11\n\tmem_limit\x18\x04 \x01(\x03\x42,\n*me.camerongray.teamlocker.client.protobufsb\x06proto3')
)
_LIBSODIUMITEM = _descriptor.Descriptor(
name='LibsodiumItem',
full_name='LibsodiumItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='LibsodiumItem.data', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nonce', full_name='LibsodiumItem.nonce', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ops_limit', full_name='LibsodiumItem.ops_limit', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mem_limit', full_name='LibsodiumItem.mem_limit', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=111,
)
DESCRIPTOR.message_types_by_name['LibsodiumItem'] = _LIBSODIUMITEM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LibsodiumItem = _reflection.GeneratedProtocolMessageType('LibsodiumItem', (_message.Message,), dict(
DESCRIPTOR = _LIBSODIUMITEM,
__module__ = 'protobufs.Libsodium_pb2'
# @@protoc_insertion_point(class_scope:LibsodiumItem)
))
_sym_db.RegisterMessage(LibsodiumItem)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n*me.camerongray.teamlocker.client.protobufs'))
# @@protoc_insertion_point(module_scope)
| gpl-3.0 | -4,341,607,792,040,625,700 | 33.75 | 289 | 0.713794 | false |
shaarli/python-shaarli-client | setup.py | 1 | 2012 | #!/usr/bin/env python3
"""Setup script for shaarli-client"""
import codecs
import os
import re
from setuptools import find_packages, setup
def get_long_description():
"""Reads the main README.rst to get the program's long description"""
with codecs.open('README.rst', 'r', 'utf-8') as f_readme:
return f_readme.read()
def get_package_metadata(attribute):
"""Reads metadata from the main package's __init__"""
with open(os.path.join('shaarli_client', '__init__.py'), 'r') as f_init:
return re.search(
r'^__{attr}__\s*=\s*[\'"]([^\'"]*)[\'"]'.format(attr=attribute),
f_init.read(), re.MULTILINE
).group(1)
setup(
name=get_package_metadata('title'),
version=get_package_metadata('version'),
description=get_package_metadata('brief'),
long_description=get_long_description(),
author=get_package_metadata('author'),
maintainer='VirtualTam',
maintainer_email='[email protected]',
license='MIT',
url='https://github.com/shaarli/python-shaarli-client',
keywords='bookmark bookmarking shaarli social',
packages=find_packages(exclude=['tests.*', 'tests']),
entry_points={
'console_scripts': [
'shaarli = shaarli_client.main:main',
],
},
install_requires=[
'requests >= 2.25',
'pyjwt == 2.0.1'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
]
)
| mit | -5,212,573,942,387,458,000 | 31.451613 | 76 | 0.598907 | false |
de-tour/detour | server/handling.py | 1 | 6094 | import cherrypy
from cherrypy.lib.static import serve_file
from cherrypy.process.plugins import SimplePlugin
from queue import Queue, Empty
from collections import namedtuple
from concurrent import Crawler
import parsing
import json
import traceback
import random
from urllib.parse import unquote
from ws4py.websocket import WebSocket
from ws4py.messaging import TextMessage
PoolItem = namedtuple('PoolItem', ['verb', 'args', 'output'])
class Search:
def __init__(self):
self.engines_suggest = []
self.engines_search = []
self.add_engines(parsing.sites)
self.pool_suggest = Crawler(cls_list=self.engines_suggest)
self.pool_search = Crawler(cls_list=self.engines_search)
def start(self):
self.pool_suggest.start()
self.pool_search.start()
def add_engines(self, engines):
for Engine in engines:
if parsing.is_balancer(Engine):
self.add_engines(Engine.balance())
else:
if parsing.can_suggest(Engine):
self.engines_suggest.append(Engine)
if parsing.can_search(Engine):
self.engines_search.append(Engine)
def stop(self):
self.pool_suggest.stop()
self.pool_search.stop()
def suggest(self, keyword):
if not keyword:
yield []
return
output = Queue()
k = len(self.engines_suggest) // 2
for engine in random.sample(self.engines_suggest, k):
self.pool_suggest.put(engine, PoolItem('suggest', (keyword,), output))
failure = 0
result_set = set()
while failure < 1:
try:
result_set.update(output.get(timeout=1))
except Empty:
failure += 1
ordered_results = parsing.rank_list(result_set, keyword)[0:10]
result_set = set(ordered_results)
yield ordered_results
def search(self, keyword, from_id):
if not keyword:
yield []
return
output = Queue()
for engine in self.engines_search:
if not parsing.is_meta(engine):
self.pool_search.put(engine, PoolItem('search', (keyword, from_id + 1, None), output))
else:
for site in parsing.domains:
filtered = engine.site_filter(site, keyword)
self.pool_search.put(engine, PoolItem('search', (filtered, from_id + 1, None), output))
failure = 0
result_set = set()
while failure < 5:
try:
new_results = set(output.get(timeout=1))
print('Search %s: %d unique results' % (repr(keyword), len(result_set)))
yield parsing.rank_list(new_results - result_set, keyword)
result_set.update(new_results)
except Empty:
failure += 1
class WSHandler(WebSocket):
def opened(self):
cherrypy.engine.log('WebSocket opened')
def received_message(self, msg):
cherrypy.engine.log('Received ' + str(msg))
try:
params = json.loads(str(msg))
verb = params['verb']
if verb == 'suggest':
self.ws_suggest(unquote(params['keyword']))
elif verb == 'search':
self.ws_search(unquote(params['keyword']), params['from_id'])
else:
raise ValueError('Unknown verb. (suggest, serach)')
except (KeyError, AttributeError, TypeError, ValueError) as e:
cherrypy.engine.log('Handler Exception - %s' % repr(e))
cherrypy.engine.log(traceback.format_exc())
def closed(self, code, reason):
cherrypy.engine.log('A client left')
def ws_suggest(self, keyword):
results = Queue()
cherrypy.engine.publish('detour_suggest', keyword, results)
generator = results.get()
for item in generator:
if item:
msg = json.dumps({'from': keyword, 'results': item})
cherrypy.engine.publish('websocket-broadcast', msg)
def ws_search(self, keyword, from_id):
results = Queue()
cherrypy.engine.publish('detour_search', keyword, from_id, results)
generator = results.get()
for r_list in generator:
if r_list:
d = {
'results': [r.items() for r in r_list],
'keyword': keyword,
'from_id': from_id,
}
cherrypy.engine.publish('websocket-broadcast', json.dumps(d))
class Daemon(SimplePlugin):
def __init__(self, bus):
SimplePlugin.__init__(self, bus)
def start(self):
self.bus.log('Daemon plugin starts')
self.priority = 70
self.search_daemon = Search()
self.search_daemon.start()
self.bus.subscribe('detour_suggest', self.suggest_handler)
self.bus.subscribe('detour_search', self.search_handler)
def stop(self):
self.bus.unsubscribe('detour_suggest', self.suggest_handler)
self.bus.unsubscribe('detour_search', self.search_handler)
self.search_daemon.stop()
self.bus.log('Daemon plugin stops')
def suggest_handler(self, keyword, bucket):
self.bus.log('Suggest ' + repr(keyword))
generator = self.search_daemon.suggest(keyword)
print("suggest_handler: got generator")
bucket.put(generator)
def search_handler(self, keyword, from_id, bucket):
self.bus.log('Search ' + repr(keyword) + ' from ID ' + repr(from_id))
generator = self.search_daemon.search(keyword, from_id)
print("search_handler: got generator")
bucket.put(generator)
class Detour:
def __init__(self, public):
self.public = public
@cherrypy.expose
def index(self, q=None):
return serve_file(self.public + '/index.html')
@cherrypy.expose
def ws(self):
handler = cherrypy.request.ws_handler
cherrypy.log("Handler created: %s" % repr(handler))
| gpl-3.0 | 3,814,767,568,741,064,000 | 32.119565 | 107 | 0.582212 | false |
niklasberglund/freesprints | source/freesprints/__init__.py | 1 | 9191 | import pygame, sys
import pygame.font
from pygame.locals import *
import logging
import fs_menu
import helpers as helpers
import plugins
import os.path
import race
import hardware
import defaults
import logging
from rainbow_logging_handler import RainbowLoggingHandler
DISPLAY_RESOLUTION = (1024, 768)
# platform-specific imports
if helpers.is_running_on_rpi():# running on Raspberry Pi
import RPi.GPIO
import os
print "ON RASPBERRY PI"
#os.environ['SDL_VIDEODRIVER']="fbcon"
#os.environ["SDL_FBDEV"] = "/dev/fb1"
print "SET DRIVER"
else: # running on computer
import FakeRPi.GPIO
class Application(object):
instance = None
state = None
# application state constants
STATE_MAINMENU = 0
STATE_INGAME = 1
# member variables
window_surface = None
menu_surface = None
menu = None
state = STATE_MAINMENU
plugin_loader = None
roller_controller = None
race_options = None
race_object = None
selected_plugin_index = 0 # 0 by default. this should ideally be restored from stored settings
def __init__(self):
print "Application.__init__"
pygame.font.init()
menu_options_dict = {
"font_path": "fonts/Cave-Story.ttf",
"font_size": 42,
"color_background": (0, 0, 0),
"color_text": (255, 255, 255),
"color_text_highlight": (100, 20, 45)
}
menu_structure = [
{
"title": "New race",
"callback": self.start_race,
"submenu": [
{
"title": "Start",
"callback": self.start_race
},
{
"title": "Race visualizer",
"callback": None,
"submenu_populator_callback": self.populate_visualizers,
"identifier": "race_visualizer_selection"
},
{
"title": "Number of rollers",
"input": {
"type": "int",
"verifier": None,
"value": "2"
},
"callback": self.start_race
},
{
"title": "Roller diameter(mm)",
"input": {
"type": "int",
"verifier": None,
"value": "200"
}
}
]
},
{
"title": "Options",
"callback": self.show_options
},
{
"title": "Exit",
"callback": self.exit
}
]
#self.window_surface = pygame.display.set_mode((500, 400), pygame.FULLSCREEN, 32)
pygame.display.init()
self.window_surface = pygame.display.set_mode(defaults.RESOLUTION, 0, 32)
menu_options = fs_menu.MenuOptions(menu_options_dict)
self.menu = fs_menu.Menu(self.window_surface, menu_structure, menu_options)
self.roller_controller = hardware.RollerController()
def load_plugins(self):
self.plugin_loader = plugins.PluginLoader()
def start_race(self):
print "start game"
self.state = self.STATE_INGAME
race_options = race.Options()
race_participants = ([
race.Participant("Niklas", 7, Color("red")),
race.Participant("Some loser", 11, Color("blue"))
])
self.race_object = race.Race(race_options, race_participants)
plugins = self.plugin_loader.getAvailablePlugins()
self.race_object.start()
plugins[self.selected_plugin_index].start(self.race_object)
def show_options(self):
print "show options"
def populate_visualizers(self):
print "populate_visualizers"
submenu = []
pluginIndex = 0
for plugin in self.plugin_loader.getAvailablePlugins():
submenu.append({
"title": plugin.name,
"callback": self.select_plugin,
"tag": pluginIndex
})
pluginIndex = pluginIndex + 1
return submenu
def select_plugin(self, plugin_index):
print "selected plugin with index " + str(plugin_index)
self.selected_plugin_index = plugin_index
def exit(self):
pygame.quit()
sys.exit()
def hide(self):
pass
def get_window_surface(self):
return self.window_surface
def game_loop(self):
# run the game loop
while True:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
self.exit()
elif event.type == pygame.locals.KEYUP:
if self.state == self.STATE_MAINMENU:
self.menu.registerKeypress(event.key)
elif event.key == pygame.locals.K_ESCAPE:
self.exit()
def start(self):
# set up pygame
pygame.init()
pygame.font.init()
if helpers.is_running_on_rpi():
disp_no = os.getenv("DISPLAY")
if disp_no:
print "I'm running under X display = {0}".format(disp_no)
# Check which frame buffer drivers are available
# Start with fbcon since directfb hangs with composite output
drivers = ['fbcon', 'directfb', 'svgalib']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
break
if not found:
raise Exception('No suitable video driver found!')
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
print "Framebuffer size: %d x %d" % (size[0], size[1])
#self.window_surface = pygame.display.set_mode(size, pygame.FULLSCREEN)
# set up the window
pygame.display.set_caption('Freesprints')
# set up the colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# set up fonts
#availableFonts = pygame.font.get_fonts()
font_path = "./fonts/Cave-Story.ttf"
#basicFont = pygame.font.SysFont(None, 30)
basicFont = pygame.font.Font(font_path, 48)
# set up the text
#text = basicFont.render('asdasd', True, WHITE, BLUE)
#textRect = text.get_rect()
#textRect.centerx = self.window_surface.get_rect().centerx
#textRect.centery = self.window_surface.get_rect().centery
# draw the white background onto the surface
self.window_surface.fill(BLACK)
# draw a green polygon onto the surface
#pygame.draw.polygon(self.window_surface, GREEN, ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106)))
# draw some blue lines onto the surface
#pygame.draw.line(self.window_surface, BLUE, (60, 60), (120, 60), 4)
#pygame.draw.line(self.window_surface, BLUE, (120, 60), (60, 120))
#pygame.draw.line(self.window_surface, BLUE, (60, 120), (120, 120), 4)
# draw a blue circle onto the surface
#pygame.draw.circle(self.window_surface, BLUE, (300, 50), 20, 0)
# draw a red ellipse onto the surface
#pygame.draw.ellipse(self.window_surface, RED, (450, 160, 40, 80), 1)
# menu background
background = pygame.image.load('images/menu_background.png').convert()
backgroundRect = background.get_rect()
backgroundRect.x = 0
backgroundRect.y = 0
self.window_surface.blit(background, backgroundRect)
# draw the window onto the screen
pygame.display.update()
self.menu.render()
self.game_loop()
app = None
logger = None
def get_app():
global app
if app == None:
app = Application()
return app
def get_logger():
global logger
if logger == None:
logger = logging.getLogger('freesprints')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\t%(message)s") # same as default
# setup colored logging
handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def init():
global app
print "start"
app = get_app()
app.load_plugins()
app.start()
| mit | -8,094,526,841,021,716,000 | 29.842282 | 121 | 0.527581 | false |
pburdet/hyperspy | hyperspy/_signals/eds.py | 1 | 21939 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
from hyperspy import utils
from hyperspy._signals.spectrum import Spectrum
from hyperspy.misc.elements import elements as elements_db
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.utils import isiterable
class EDSSpectrum(Spectrum):
_signal_type = "EDS"
def __init__(self, *args, **kwards):
Spectrum.__init__(self, *args, **kwards)
if self.metadata.Signal.signal_type == 'EDS':
print('The microscope type is not set. Use '
'set_signal_type(\'EDS_TEM\') or set_signal_type(\'EDS_SEM\')')
self.metadata.Signal.binned = True
def _get_line_energy(self, Xray_line, FWHM_MnKa=None):
"""
Get the line energy and the energy resolution of a Xray line.
The return values are in the same units than the signal axis
Parameters
----------
Xray_line : strings
Valid element X-ray lines e.g. Fe_Kb.
FWHM_MnKa: {None, float, 'auto'}
The energy resolution of the detector in eV
if 'auto', used the one in
'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'
Returns
------
float: the line energy, if FWHM_MnKa is None
(float,float): the line energy and the energy resolution, if FWHM_MnKa is not None
"""
units_name = self.axes_manager.signal_axes[0].units
if FWHM_MnKa == 'auto':
if self.metadata.Signal.signal_type == 'EDS_SEM':
FWHM_MnKa = self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa
elif self.metadata.Signal.signal_type == 'EDS_TEM':
FWHM_MnKa = self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa
else:
raise NotImplementedError(
"This method only works for EDS_TEM or EDS_SEM signals. "
"You can use `set_signal_type(\"EDS_TEM\")` or"
"`set_signal_type(\"EDS_SEM\")` to convert to one of these"
"signal types.")
line_energy = utils_eds._get_energy_xray_line(Xray_line)
if units_name == 'eV':
line_energy *= 1000
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy / 1000) * 1000
elif units_name == 'keV':
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy)
else:
raise ValueError(
"%s is not a valid units for the energy axis. "
"Only `eV` and `keV` are supported. "
"If `s` is the variable containing this EDS spectrum:\n "
">>> s.axes_manager.signal_axes[0].units = \'keV\' \n"
% (units_name))
if FWHM_MnKa is None:
return line_energy
else:
return line_energy, line_FWHM
def _get_beam_energy(self):
"""
Get the beam energy.
The return value is in the same units than the signal axis
"""
if "Acquisition_instrument.SEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy
elif "Acquisition_instrument.TEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy
else:
raise AttributeError(
"To use this method the beam energy `Acquisition_instrument.TEM.beam_energy` "
"or `Acquisition_instrument.SEM.beam_energy` must be defined in "
"`metadata`.")
units_name = self.axes_manager.signal_axes[0].units
if units_name == 'eV':
beam_energy = beam_energy * 1000
return beam_energy
def sum(self, axis):
"""Sum the data over the given axis.
Parameters
----------
axis : {int, string}
The axis can be specified using the index of the axis in
`axes_manager` or the axis name.
Returns
-------
s : Signal
See also
--------
sum_in_mask, mean
Examples
--------
>>> import numpy as np
>>> s = Signal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.sum(-1).data.shape
(64,64)
# If we just want to plot the result of the operation
s.sum(-1, True).plot()
"""
# modify time spend per spectrum
if "Acquisition_instrument.SEM" in self.metadata:
mp = self.metadata.Acquisition_instrument.SEM
else:
mp = self.metadata.Acquisition_instrument.TEM
if mp.has_item('Detector.EDS.live_time'):
mp.Detector.EDS.live_time = mp.Detector.EDS.live_time * \
self.axes_manager.shape[axis]
return super(EDSSpectrum, self).sum(axis)
def rebin(self, new_shape):
"""Rebins the data to the new shape
Parameters
----------
new_shape: tuple of ints
The new shape must be a divisor of the original shape
"""
new_shape_in_array = []
for axis in self.axes_manager._axes:
new_shape_in_array.append(
new_shape[axis.index_in_axes_manager])
factors = (np.array(self.data.shape) /
np.array(new_shape_in_array))
s = super(EDSSpectrum, self).rebin(new_shape)
# modify time per spectrum
if "Acquisition_instrument.SEM.Detector.EDS.live_time" in s.metadata:
for factor in factors:
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time *= factor
if "Acquisition_instrument.TEM.Detector.EDS.live_time" in s.metadata:
for factor in factors:
s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time *= factor
return s
def set_elements(self, elements):
"""Erase all elements and set them.
Parameters
----------
elements : list of strings
A list of chemical element symbols.
See also
--------
add_elements, set_line, add_lines.
Examples
--------
>>> s = signals.EDSSEMSpectrum(np.arange(1024))
>>> s.set_elements(['Ni', 'O'],['Ka','Ka'])
Adding Ni_Ka Line
Adding O_Ka Line
>>> s.mapped_paramters.Acquisition_instrument.SEM.beam_energy = 10
>>> s.set_elements(['Ni', 'O'])
Adding Ni_La Line
Adding O_Ka Line
"""
# Erase previous elements and X-ray lines
if "Sample.elements" in self.metadata:
del self.metadata.Sample.elements
self.add_elements(elements)
def add_elements(self, elements):
"""Add elements and the corresponding X-ray lines.
The list of elements is stored in `metadata.Sample.elements`
Parameters
----------
elements : list of strings
The symbol of the elements.
See also
--------
set_elements, add_lines, set_lines.
"""
if not isiterable(elements) or isinstance(elements, basestring):
raise ValueError(
"Input must be in the form of a list. For example, "
"if `s` is the variable containing this EDS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
if "Sample.elements" in self.metadata:
elements_ = set(self.metadata.Sample.elements)
else:
elements_ = set()
for element in elements:
if element in elements_db:
elements_.add(element)
else:
raise ValueError(
"%s is not a valid chemical element symbol." % element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = sorted(list(elements_))
def set_lines(self,
lines,
only_one=True,
only_lines=("Ka", "La", "Ma")):
"""Erase all Xrays lines and set them.
See add_lines for details.
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
See also
--------
add_lines, add_elements, set_elements..
"""
if "Sample.xray_lines" in self.metadata:
del self.metadata.Sample.xray_lines
self.add_lines(lines=lines,
only_one=only_one,
only_lines=only_lines)
def add_lines(self,
lines=(),
only_one=True,
only_lines=("Ka", "La", "Ma")):
"""Add X-rays lines to the internal list.
Although most functions do not require an internal list of
X-ray lines because they can be calculated from the internal
list of elements, ocassionally it might be useful to customize the
X-ray lines to be use by all functions by default using this method.
The list of X-ray lines is stored in
`metadata.Sample.xray_lines`
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list. If the list is empty (default), and
`metadata.Sample.elements` is
defined, add the lines of all those elements.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
See also
--------
set_lines, add_elements, set_elements.
"""
if "Sample.xray_lines" in self.metadata:
xray_lines = set(self.metadata.Sample.xray_lines)
else:
xray_lines = set()
# Define the elements which Xray lines has been customized
# So that we don't attempt to add new lines automatically
elements = set()
for line in xray_lines:
elements.add(line.split("_")[0])
end_energy = self.axes_manager.signal_axes[0].high_value
for line in lines:
try:
element, subshell = line.split("_")
except ValueError:
raise ValueError(
"Invalid line symbol. "
"Please provide a valid line symbol e.g. Fe_Ka")
if element in elements_db:
elements.add(element)
if subshell in elements_db[element]['Atomic_properties']['Xray_lines']:
lines_len = len(xray_lines)
xray_lines.add(line)
if lines_len != len(xray_lines):
print("%s line added," % line)
else:
print("%s line already in." % line)
if (self._get_line_energy(element + '_' + subshell) > end_energy):
print("Warning: %s %s is above the data energy range."
% (element, subshell))
else:
raise ValueError(
"%s is not a valid line of %s." % (line, element))
else:
raise ValueError(
"%s is not a valid symbol of an element." % element)
if "Sample.elements" in self.metadata:
extra_elements = (set(self.metadata.Sample.elements) -
elements)
if extra_elements:
new_lines = self._get_lines_from_elements(
extra_elements,
only_one=only_one,
only_lines=only_lines)
if new_lines:
self.add_lines(list(new_lines) + list(lines))
self.add_elements(elements)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
if "Sample.xray_lines" in self.metadata:
xray_lines = xray_lines.union(
self.metadata.Sample.xray_lines)
self.metadata.Sample.xray_lines = sorted(list(xray_lines))
def _get_lines_from_elements(self,
elements,
only_one=False,
only_lines=("Ka", "La", "Ma")):
"""Returns the X-ray lines of the given elements in spectral range
of the data.
Parameters
----------
elements : list of strings
A list containing the symbol of the chemical elements.
only_one : bool
If False, add all the lines of each element in the data spectral
range. If True only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be returned.
Returns
-------
"""
beam_energy = self._get_beam_energy()
end_energy = self.axes_manager.signal_axes[0].high_value
if beam_energy < end_energy:
end_energy = beam_energy
lines = []
for element in elements:
# Possible line (existing and excited by electron)
element_lines = []
for subshell in elements_db[element]['Atomic_properties']['Xray_lines'].keys():
if only_lines and subshell not in only_lines:
continue
if (self._get_line_energy(element + '_' + subshell) < end_energy):
element_lines.append(element + "_" + subshell)
if only_one and element_lines:
# Choose the best line
select_this = -1
for i, line in enumerate(element_lines):
if (self._get_line_energy(line) < beam_energy / 2):
select_this = i
break
element_lines = [element_lines[select_this], ]
if not element_lines:
print(("There is not X-ray line for element %s " % element) +
"in the data spectral range")
else:
lines.extend(element_lines)
return lines
def get_lines_intensity(self,
xray_lines=None,
plot_result=False,
integration_window_factor=2.,
only_one=True,
only_lines=("Ka", "La", "Ma"),
**kwargs):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
defined as defined in
`self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa` or
`self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa`.
Parameters
----------
xray_lines: {None, "best", list of string}
If None,
if `mapped.parameters.Sample.elements.xray_lines` contains a
list of lines use those.
If `mapped.parameters.Sample.elements.xray_lines` is undefined
or empty but `mapped.parameters.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
integration_window_factor: Float
The integration window is centered at the center of the X-ray
line and its width is defined by this factor (2 by default)
times the calculated FWHM of the line.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as Signal subclasses.
Examples
--------
>>> specImg.get_lines_intensity(["C_Ka", "Ta_Ma"])
See also
--------
set_elements, add_elements.
"""
if xray_lines is None:
if 'Sample.xray_lines' in self.metadata:
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
raise ValueError(
"Not X-ray line, set them with `add_elements`")
intensities = []
# test 1D Spectrum (0D problem)
#signal_to_index = self.axes_manager.navigation_dimension - 2
for Xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(Xray_line,
FWHM_MnKa='auto')
det = integration_window_factor * line_FWHM / 2.
img = self[..., line_energy - det:line_energy + det
].integrate1D(-1)
img.metadata.General.title = (
'Intensity of %s at %.2f %s from %s' %
(Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
self.metadata.General.title))
if img.axes_manager.navigation_dimension >= 2:
img = img.as_image([0, 1])
elif img.axes_manager.navigation_dimension == 1:
img.axes_manager.set_signal_dimension(1)
if plot_result and img.axes_manager.signal_dimension == 0:
print("%s at %s %s : Intensity = %.2f"
% (Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
img.data))
intensities.append(img)
if plot_result and img.axes_manager.signal_dimension != 0:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
def get_take_off_angle(self):
"""Calculate the take-off-angle (TOA).
TOA is the angle with which the X-rays leave the surface towards
the detector. Parameters are read in 'SEM.tilt_stage',
'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and 'SEM.Detector.EDS.elevation_angle'
in 'metadata'.
Returns
-------
take_off_angle: float (Degree)
See also
--------
utils.eds.take_off_angle
Notes
-----
Defined by M. Schaffer et al., Ultramicroscopy 107(8), pp 587-597 (2007)
"""
if self.metadata.Signal.signal_type == 'EDS_SEM':
mp = self.metadata.Acquisition_instrument.SEM
elif self.metadata.Signal.signal_type == 'EDS_TEM':
mp = self.metadata.Acquisition_instrument.TEM
tilt_stage = mp.tilt_stage
azimuth_angle = mp.Detector.EDS.azimuth_angle
elevation_angle = mp.Detector.EDS.elevation_angle
TOA = utils.eds.take_off_angle(tilt_stage, azimuth_angle,
elevation_angle)
return TOA
| gpl-3.0 | 1,777,743,600,678,881,300 | 37.489474 | 104 | 0.548749 | false |
stevedh/queryutils | queryutils/user.py | 1 | 2181 |
from json import JSONEncoder
class User(object):
def __init__(self, name):
self.name = name
self.sessions = {}
self.queries = []
class VerboseUserEncoder(JSONEncoder):
def encode(self, obj):
user_dict = {}
user_dict['name'] = obj.name
session_dict = {}
for (session_id, session) in obj.sessions.iteritems():
session_dict[session_id] = SessionEncoder().default(session)
query_list = []
for query in obj.queries:
query_list.append(QueryEncoder().default(query))
user_dict['queries'] = query_list
return user_dict
def default(self, obj):
if isinstance(obj, User):
return self.encode(obj)
return JSONEncoder.default(self, obj)
class UserEncoder(JSONEncoder):
def encode(self, obj):
user_dict = {}
user_dict['name'] = obj.name
session_dict = {}
for (session_id, session) in obj.sessions.iteritems():
session_dict['id'] = session_id
query_list = []
for query in session.queries:
query_dict = {}
query_dict['delta'] = query.delta
query_dict['time'] = query.time
query_dict['text'] = query.text
query_list.append(query_dict)
session_dict['queries'] = query_list
session_dict['user'] = obj.name
try:
autorecurring_query_list = []
for query in obj.autorecurring_queries:
query_dict = {}
query_dict['repeat_delta'] = query.repeat_delta
query_dict['time'] = query.time
query_dict['text'] = query.text
autorecurring_query_list.append(query_dict)
user_dict['autorecurring_queries'] = autorecurring_query_list
except AttributeError:
print "Not encoding autorecurring queries. No such attribute."
user_dict['sessions'] = session_dict
return user_dict
def default(self, obj):
if isinstance(obj, User):
return self.encode(obj)
return JSONEncoder.default(self, obj)
| bsd-3-clause | 8,245,894,538,224,595,000 | 33.078125 | 74 | 0.558459 | false |
carolFrohlich/nipype | nipype/pipeline/plugins/tests/test_debug.py | 2 | 1618 | # -*- coding: utf-8 -*-
import os
import nipype.interfaces.base as nib
from tempfile import mkdtemp
from shutil import rmtree
from nipype.testing import assert_raises, assert_false
import nipype.pipeline.engine as pe
class InputSpec(nib.TraitedSpec):
input1 = nib.traits.Int(desc='a random int')
input2 = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
output1 = nib.traits.List(nib.traits.Int, desc='outputs')
class TestInterface(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output1'] = [1, self.inputs.input1]
return outputs
def callme(node, graph):
pass
def test_debug():
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
mod1 = pe.Node(interface=TestInterface(), name='mod1')
mod2 = pe.MapNode(interface=TestInterface(),
iterfield=['input1'],
name='mod2')
pipe.connect([(mod1, mod2, [('output1', 'input1')])])
pipe.base_dir = os.getcwd()
mod1.inputs.input1 = 1
run_wf = lambda: pipe.run(plugin="Debug")
yield assert_raises, ValueError, run_wf
try:
pipe.run(plugin="Debug", plugin_args={'callable': callme})
exception_raised = False
except Exception:
exception_raised = True
yield assert_false, exception_raised
os.chdir(cur_dir)
rmtree(temp_dir)
| bsd-3-clause | 4,694,182,697,043,114,000 | 25.966667 | 66 | 0.645859 | false |
ajhager/copycat | copycat/workspace/string.py | 1 | 12784 | # Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""String"""
import random
import copycat.toolbox as toolbox
class String(object):
"""String is a letter string in the workspace.
This could be the initial string, modified string or target string.
Each object in a string has a unique string number that identifies
it from other objects in the string."""
def __init__(self, workspace, string):
self.workspace = workspace
self.slipnet = self.workspace.slipnet
self.name = string
self.highest_string_number = -1
self.length = len(string)
self.letters = {}
self.groups = {}
self.proposed_groups = {}
self.object_positions = {}
self.left_right_bonds = {}
self.from_to_bonds = {}
self.proposed_bonds = {}
self.intra_string_unhappiness = 0
self.bonds_to_scan_distribution = range(self.length)
def add_to_object_positions(self, obj, position):
"""Add an object to the object positions."""
if position in self.object_positions:
self.object_positions[position].append(obj)
else:
self.object_positions[position] = [obj]
def remove_from_object_positions(self, obj, position):
"""Remove an object from the object positions."""
if obj in self.object_positions[position]:
self.object_positions[position].remove(obj)
def add_letter(self, letter):
"""Add a letter to the string."""
self.highest_string_number += 1
letter.string_number = self.highest_string_number
position = letter.left_string_position
self.letters[position] = letter
self.add_to_object_positions(letter, position)
def get_letters(self):
"""Return a list of letters in the string."""
return [self.letters[index] for index in sorted(self.letters.keys())]
def get_letter(self, position):
"""Return the letter at the given position in the string."""
return self.letters.get(position)
def get_random_letter(self):
"""Return a random letter from the string."""
return random.choice(self.get_letters())
def get_leftmost_letter(self):
"""Return the leftmost letter in the string."""
return self.letters.get(0)
def get_rightmost_letter(self):
"""Return the rightmost letter in the string."""
return self.letters.get(len(self.letters) - 1)
def add_group(self, group):
"""Add a group to the string."""
self.highest_string_number += 1
group.string_number = self.highest_string_number
self.groups[group.left_object.string_number] = group
self.add_to_object_positions(group, group.left_string_position)
self.add_to_object_positions(group, group.right_string_position)
def remove_group(self, group):
"""Remove a group from the string."""
if group.left_object.string_number in self.groups:
del self.groups[group.left_object.string_number]
self.remove_from_object_positions(group, group.left_string_position)
self.remove_from_object_positions(group, group.right_string_position)
def get_groups(self):
"""Return a list of groups in the string."""
return list(self.groups.values())
def get_group(self, position):
"""Return the group at the given position in letters.
Positions start at 0 and refer to the position of the leftmost object
in the group."""
return self.get_letter(position).group
def get_existing_group(self, group):
"""Return the group in the string if it has the same properties as
the given group."""
existing_group = self.groups.get(group.left_object.string_number)
if existing_group:
if existing_group.length == group.length and \
existing_group.group_category == group.group_category and \
existing_group.direction_category == group.direction_category:
return existing_group
def add_proposed_group(self, group):
"""Add a proposed group to the string."""
position = (group.left_object.string_number,
group.right_object.string_number)
if position in self.proposed_groups:
self.proposed_groups[position].append(group)
else:
self.proposed_groups[position] = [group]
def remove_proposed_group(self, group):
"""Remove a proposed group from the string."""
position = (group.left_object.string_number,
group.right_object.string_number)
items = self.proposed_groups.get(position, [])
if group in items:
self.proposed_groups[position].remove(group)
def get_proposed_groups(self):
"""Return a list of the proposed groups in the string."""
return list(set(toolbox.flatten(self.proposed_groups.values())))
def get_proposed_group(self, first, second):
"""Return the proposed group at first, second position."""
return self.proposed_groups.get((first, second))
def add_bond(self, bond):
"""Add a bond to the string, sameness bonds in both directions."""
left_number = bond.left_object.string_number
right_number = bond.right_object.string_number
self.left_right_bonds[(left_number, right_number)] = bond
from_number = bond.from_object.string_number
to_number = bond.to_object.string_number
self.from_to_bonds[(from_number, to_number)] = bond
if bond.bond_category == self.slipnet.plato_sameness:
self.left_right_bonds[(right_number, left_number)] = bond
self.from_to_bonds[(to_number, from_number)] = bond
def remove_bond(self, bond):
"""Remove a built bond from the string."""
left_number = bond.left_object.string_number
right_number = bond.right_object.string_number
if (left_number, right_number) in self.left_right_bonds:
del self.left_right_bonds[(left_number, right_number)]
from_number = bond.from_object.string_number
to_number = bond.to_object.string_number
if (from_number, to_number) in self.from_to_bonds:
del self.from_to_bonds[(from_number, to_number)]
if bond.bond_category == self.slipnet.plato_sameness:
if (right_number, left_number) in self.left_right_bonds:
del self.left_right_bonds[(right_number, left_number)]
if (to_number, from_number) in self.from_to_bonds:
del self.from_to_bonds[(to_number, from_number)]
def get_bonds(self):
"""Return a list of the built bonds in the string."""
return list(set(self.from_to_bonds.values()))
def get_bond(self, from_object, to_object):
"""Return the bond between the two objects, if any."""
return self.from_to_bonds.get((from_object.string_number,
to_object.string_number))
def get_existing_bond(self, bond):
"""Return the bond in the string if it has the same properties as
the given bond."""
existing_bond = self.get_bond(bond.from_object, bond.to_object)
if existing_bond:
if existing_bond.bond_category == bond.bond_category and \
existing_bond.direction_category == bond.direction_category:
return existing_bond
def add_proposed_bond(self, bond):
"""Add the proposed bond to the string."""
position = (bond.from_object.string_number,
bond.to_object.string_number)
if position in self.proposed_bonds:
self.proposed_bonds[position].append(bond)
else:
self.proposed_bonds[position] = [bond]
def remove_proposed_bond(self, bond):
"""Add the proposed bond to the string."""
position = (bond.from_object.string_number,
bond.to_object.string_number)
if position in self.proposed_bonds:
items = self.proposed_bonds[position]
if bond in items:
self.proposed_bonds[position].remove(bond)
def get_proposed_bonds(self):
"""Return a list of proposed bonds in the string."""
return list(set(toolbox.flatten(self.proposed_bonds.values())))
def get_proposed_bond(self, first, second):
"""Return a proposed bonds at first, second in the string."""
return self.proposed_bonds.get((first, second))
def get_objects(self, category=None):
"""Return the list of objects of the given object category.
If no category is given, return all objects."""
if category == self.slipnet.plato_letter:
return self.get_letters()
elif category == self.slipnet.plato_group:
return self.get_groups()
return self.get_letters() + self.get_groups()
def get_non_string_spanning_objects(self):
"""Return all objects that do not span the entire string."""
return [o for o in self.get_objects() if not o.spans_whole_string()]
def get_random_object(self, method=None):
"""Return a random object from the string."""
if method:
objects = self.get_objects()
values = [getattr(obj, method) for obj in objects]
values = self.workspace.temperature_adjusted_values(values)
return objects[toolbox.weighted_index(values)]
return random.choice(self.get_objects())
def get_random_leftmost_object(self):
"""Return a random leftmost object from the string."""
leftmost_objects = []
category = self.slipnet.plato_string_position_category
for obj in self.get_objects():
if obj.get_descriptor(category) == self.slipnet.plato_leftmost:
leftmost_objects.append(obj)
if leftmost_objects:
values = [obj.relative_importance for obj in leftmost_objects]
return toolbox.weighted_select(values, leftmost_objects)
def update_relative_importances(self):
"""Update the relative, normalized importances of all the objects in
the string."""
raw_importance = sum([o.raw_importance for o in self.get_objects()])
for obj in self.get_objects():
if raw_importance == 0:
importance = 0
else:
quot = obj.raw_importance / float(raw_importance)
importance = round(100 * quot)
obj.relative_importance = importance
def update_intra_string_unhappiness(self):
"""Calculate the average of the intra-string unhappiness of all the
objects in the string."""
unhappiness = [o.intra_string_unhappiness for o in self.get_objects()]
self.intra_string_unhappiness = round(toolbox.average(*unhappiness))
def local_bond_category_relevance(self, bond_category):
"""A function of how many bonds in the string have the given bond
category. This function is not perfect; it gives just a rough
estimate of the relevance of this bond category."""
objects = self.get_non_string_spanning_objects()
if len(objects) == 1:
return 0
bond_count = 0
for obj in objects:
if obj.right_bond:
if obj.right_bond.bond_category == bond_category:
bond_count += 1
return 100 * (float(bond_count) / (len(objects) - 1))
def local_direction_category_relevance(self, direction_category):
"""A function of how many bonds in the string have the given direction
category. This function is not perfect; it gives just a rough estimate
of the relevance of this direction category."""
objects = self.get_non_string_spanning_objects()
if len(objects) == 1:
return 0
bond_count = 0
for obj in objects:
if obj.right_bond:
if obj.right_bond.direction_category == direction_category:
bond_count += 1
return 100 * (float(bond_count) / (len(objects) - 1))
| gpl-2.0 | -2,942,944,997,608,735,000 | 41.471761 | 80 | 0.632431 | false |
noahlittle/noahlittle.github.io | iCTRL/var/mobile/pentest/exploits/iCTRL/cupp/cupp.py | 1 | 55986 | #!/usr/bin/python
#
# [Program]
#
# CUPP 3.1
# Common User Passwords Profiler
#
#
#
# [Author]
#
# Muris Kurgas aka j0rgan
# j0rgan [at] remote-exploit [dot] org
# http://www.remote-exploit.org
# http://www.azuzi.me
#
#
#
# [License]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See 'docs/LICENSE' for more information.
import sys
import os
import ftplib
import ConfigParser
import urllib
import gzip
import csv
# Reading configuration file...
config = ConfigParser.ConfigParser()
config.read('cupp.cfg')
years = config.get('years', 'years').split(',')
chars = config.get('specialchars', 'chars').split(',')
numfrom = config.getint('nums','from')
numto = config.getint('nums','to')
wcfrom = config.getint('nums','wcfrom')
wcto = config.getint('nums','wcto')
threshold = config.getint('nums','threshold')
# 1337 mode configs, well you can add more lines if you add it to config file too.
# You will need to add more lines in two places in cupp.py code as well...
a = config.get('leet','a')
i = config.get('leet','i')
e = config.get('leet','e')
t = config.get('leet','t')
o = config.get('leet','o')
s = config.get('leet','s')
g = config.get('leet','g')
z = config.get('leet','z')
# for concatenations...
def concats(seq, start, stop):
for mystr in seq:
for num in xrange(start, stop):
yield mystr + str(num)
# for sorting and making combinations...
def komb(seq, start):
for mystr in seq:
for mystr1 in start:
yield mystr + mystr1
if len(sys.argv) < 2 or sys.argv[1] == '-h':
print " ___________ "
print " \033[07m cupp.py! \033[27m # Common"
print " \ # User"
print " \ \033[1;31m,__,\033[1;m # Passwords"
print " \ \033[1;31m(\033[1;moo\033[1;31m)____\033[1;m # Profiler"
print " \033[1;31m(__) )\ \033[1;m "
print " \033[1;31m ||--|| \033[1;m\033[05m*\033[25m\033[1;m [ Muris Kurgas | [email protected] ]\r\n\r\n"
print " [ Options ]\r\n"
print " -h You are looking at it baby! :)"
print " For more help take a look in docs/README"
print " Global configuration file is cupp.cfg\n"
print " -i Interactive questions for user password profiling\r\n"
print " -w Use this option to improve existing dictionary,"
print " or WyD.pl output to make some pwnsauce\r\n"
print " -l Download huge wordlists from repository\r\n"
print " -a Parse default usernames and passwords directly from Alecto DB."
print " Project Alecto uses purified databases of Phenoelit and CIRT"
print " which where merged and enhanced.\r\n"
print " -v Version of the program\r\n"
exit()
elif sys.argv[1] == '-v':
print "\r\n \033[1;31m[ cupp.py ] v3.1\033[1;m\r\n"
print " * Hacked up by j0rgan - [email protected]"
print " * http://www.remote-exploit.org\r\n"
print " Take a look docs/README file for more info about the program\r\n"
exit()
elif sys.argv[1] == '-w':
if len(sys.argv) < 3:
print "\r\n[Usage]: "+sys.argv[0]+" -w [FILENAME]\r\n"
exit()
fajl = open(sys.argv[2], "r")
listic = fajl.readlines()
linije = 0
for line in listic:
linije += 1
listica = []
for x in listic:
listica += x.split()
print "\r\n *************************************************"
print " * \033[1;31mWARNING!!!\033[1;m *"
print " * Using large wordlists in some *"
print " * options bellow is NOT recommended! *"
print " *************************************************\r\n"
conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower()
if conts == "y" and linije > threshold:
print "\r\n[-] Maximum number of words for concatenation is "+str(threshold)
print "[-] Check configuration file for increasing this number.\r\n"
conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower()
conts = conts
cont = ['']
if conts == "y":
for cont1 in listica:
for cont2 in listica:
if listica.index(cont1) != listica.index(cont2):
cont.append(cont1+cont2)
spechars = ['']
spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower()
if spechars1 == "y":
for spec1 in chars:
spechars.append(spec1)
for spec2 in chars:
spechars.append(spec1+spec2)
for spec3 in chars:
spechars.append(spec1+spec2+spec3)
randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower()
leetmode = raw_input("[>]Leet mode? (i.e. leet = 1337) Y/[N]: ").lower()
kombinacija1 = list(komb(listica, years))
kombinacija2 = ['']
if conts == "y":
kombinacija2 = list(komb(cont, years))
kombinacija3 = ['']
kombinacija4 = ['']
if spechars1 == "y":
kombinacija3 = list(komb(listica, spechars))
if conts == "y":
kombinacija4 = list(komb(cont, spechars))
kombinacija5 = ['']
kombinacija6 = ['']
if randnum == "y":
kombinacija5 = list(concats(listica, numfrom, numto))
if conts == "y":
kombinacija6 = list(concats(cont, numfrom, numto))
print "\r\n[+] Now making a dictionary..."
print "[+] Sorting list and removing duplicates..."
komb_unique1 = dict.fromkeys(kombinacija1).keys()
komb_unique2 = dict.fromkeys(kombinacija2).keys()
komb_unique3 = dict.fromkeys(kombinacija3).keys()
komb_unique4 = dict.fromkeys(kombinacija4).keys()
komb_unique5 = dict.fromkeys(kombinacija5).keys()
komb_unique6 = dict.fromkeys(kombinacija6).keys()
komb_unique7 = dict.fromkeys(listica).keys()
komb_unique8 = dict.fromkeys(cont).keys()
uniqlist = komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8
unique_lista = dict.fromkeys(uniqlist).keys()
unique_leet = []
if leetmode == "y":
for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too...
x = x.replace('a',a)
x = x.replace('i',i)
x = x.replace('e',e)
x = x.replace('t',t)
x = x.replace('o',o)
x = x.replace('s',s)
x = x.replace('g',g)
x = x.replace('z',z)
unique_leet.append(x)
unique_list = unique_lista + unique_leet
unique_list_finished = []
for x in unique_list:
if len(x) > wcfrom and len(x) < wcto:
unique_list_finished.append(x)
f = open ( sys.argv[2]+'.cupp.txt', 'w' )
unique_list_finished.sort()
f.write (os.linesep.join(unique_list_finished))
f = open ( sys.argv[2]+'.cupp.txt', 'r' )
lines = 0
for line in f:
lines += 1
f.close()
print "[+] Saving dictionary to \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m, counting \033[1;31m"+str(lines)+" words.\033[1;m"
print "[+] Now load your pistolero with \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m and shoot! Good luck!"
fajl.close()
exit()
elif sys.argv[1] == '-i':
print "\r\n[+] Insert the informations about the victim to make a dictionary"
print "[+] If you don't know all the info, just hit enter when asked! ;)\r\n"
# We need some informations first!
name = raw_input("[>] Name: ").lower()
while len(name) == 0 or name == " " or name == " " or name == " ":
print "\r\n[-] You must enter a name at least!"
name = raw_input("[>] Name: ").lower()
name = str(name)
surname = raw_input("[>] Surname: ").lower()
nick = raw_input("[>] Nickname: ").lower()
birthdate = raw_input("[>] Birthdate (DDMMYYYY): ")
while len(birthdate) != 0 and len(birthdate) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
birthdate = raw_input("[>] Birthdate (DDMMYYYY): ")
birthdate = str(birthdate)
print "\r\n"
wife = raw_input("[>] Wife's(husband's) name: ").lower()
wifen = raw_input("[>] Wife's(husband's) nickname: ").lower()
wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ")
while len(wifeb) != 0 and len(wifeb) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ")
wifeb = str(wifeb)
print "\r\n"
kid = raw_input("[>] Child's name: ").lower()
kidn = raw_input("[>] Child's nickname: ").lower()
kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ")
while len(kidb) != 0 and len(kidb) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ")
kidb = str(kidb)
print "\r\n"
pet = raw_input("[>] Pet's name: ").lower()
company = raw_input("[>] Company name: ").lower()
print "\r\n"
words = ['']
oth = raw_input("[>] Do you want to add some key words about the victim? Y/[N]: ").lower()
if oth == "y":
words = raw_input("[>] Please enter the words, separated by comma. [i.e. hacker, juice, black]: ").lower().split(", ")
spechars = ['']
spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower()
if spechars1 == "y":
for spec1 in chars:
spechars.append(spec1)
for spec2 in chars:
spechars.append(spec1+spec2)
for spec3 in chars:
spechars.append(spec1+spec2+spec3)
randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower()
leetmode = raw_input("[>] Leet mode? (i.e. leet = 1337) Y/[N]: ").lower()
print "\r\n[+] Now making a dictionary..."
# Now me must do some string modifications...
# Birthdays first
birthdate_yy = birthdate[-2:]
birthdate_yyy = birthdate[-3:]
birthdate_yyyy = birthdate[-4:]
birthdate_xd = birthdate[1:2]
birthdate_xm = birthdate[3:4]
birthdate_dd = birthdate[:2]
birthdate_mm = birthdate[2:4]
wifeb_yy = wifeb[-2:]
wifeb_yyy = wifeb[-3:]
wifeb_yyyy = wifeb[-4:]
wifeb_xd = wifeb[1:2]
wifeb_xm = wifeb[3:4]
wifeb_dd = wifeb[:2]
wifeb_mm = wifeb[2:4]
kidb_yy = kidb[-2:]
kidb_yyy = kidb[-3:]
kidb_yyyy = kidb[-4:]
kidb_xd = kidb[1:2]
kidb_xm = kidb[3:4]
kidb_dd = kidb[:2]
kidb_mm = kidb[2:4]
# Convert first letters to uppercase...
nameup = name.title()
surnameup = surname.title()
nickup = nick.title()
wifeup = wife.title()
wifenup = wifen.title()
kidup = kid.title()
kidnup = kidn.title()
petup = pet.title()
companyup = company.title()
wordsup = []
for words1 in words:
wordsup.append(words1.title())
word = words+wordsup
# reverse a name
rev_name = name[::-1]
rev_nameup = nameup[::-1]
rev_nick = nick[::-1]
rev_nickup = nickup[::-1]
rev_wife = wife[::-1]
rev_wifeup = wifeup[::-1]
rev_kid = kid[::-1]
rev_kidup = kidup[::-1]
reverse = [rev_name, rev_nameup, rev_nick, rev_nickup, rev_wife, rev_wifeup, rev_kid, rev_kidup]
rev_n = [rev_name, rev_nameup, rev_nick, rev_nickup]
rev_w = [rev_wife, rev_wifeup]
rev_k = [rev_kid, rev_kidup]
# Let's do some serious work! This will be a mess of code, but... who cares? :)
# Birthdays combinations
bds = [birthdate_yy, birthdate_yyy, birthdate_yyyy, birthdate_xd, birthdate_xm, birthdate_dd, birthdate_mm]
bdss = []
for bds1 in bds:
bdss.append(bds1)
for bds2 in bds:
if bds.index(bds1) != bds.index(bds2):
bdss.append(bds1+bds2)
for bds3 in bds:
if bds.index(bds1) != bds.index(bds2) and bds.index(bds2) != bds.index(bds3) and bds.index(bds1) != bds.index(bds3):
bdss.append(bds1+bds2+bds3)
# For a woman...
wbds = [wifeb_yy, wifeb_yyy, wifeb_yyyy, wifeb_xd, wifeb_xm, wifeb_dd, wifeb_mm]
wbdss = []
for wbds1 in wbds:
wbdss.append(wbds1)
for wbds2 in wbds:
if wbds.index(wbds1) != wbds.index(wbds2):
wbdss.append(wbds1+wbds2)
for wbds3 in wbds:
if wbds.index(wbds1) != wbds.index(wbds2) and wbds.index(wbds2) != wbds.index(wbds3) and wbds.index(wbds1) != wbds.index(wbds3):
wbdss.append(wbds1+wbds2+wbds3)
# and a child...
kbds = [kidb_yy, kidb_yyy, kidb_yyyy, kidb_xd, kidb_xm, kidb_dd, kidb_mm]
kbdss = []
for kbds1 in kbds:
kbdss.append(kbds1)
for kbds2 in kbds:
if kbds.index(kbds1) != kbds.index(kbds2):
kbdss.append(kbds1+kbds2)
for kbds3 in kbds:
if kbds.index(kbds1) != kbds.index(kbds2) and kbds.index(kbds2) != kbds.index(kbds3) and kbds.index(kbds1) != kbds.index(kbds3):
kbdss.append(kbds1+kbds2+kbds3)
# string combinations....
kombinaac = [pet, petup, company, companyup]
kombina = [name, surname, nick, nameup, surnameup, nickup]
kombinaw = [wife, wifen, wifeup, wifenup, surname, surnameup]
kombinak = [kid, kidn, kidup, kidnup, surname, surnameup]
kombinaa = []
for kombina1 in kombina:
kombinaa.append(kombina1)
for kombina2 in kombina:
if kombina.index(kombina1) != kombina.index(kombina2) and kombina.index(kombina1.title()) != kombina.index(kombina2.title()):
kombinaa.append(kombina1+kombina2)
kombinaaw = []
for kombina1 in kombinaw:
kombinaaw.append(kombina1)
for kombina2 in kombinaw:
if kombinaw.index(kombina1) != kombinaw.index(kombina2) and kombinaw.index(kombina1.title()) != kombinaw.index(kombina2.title()):
kombinaaw.append(kombina1+kombina2)
kombinaak = []
for kombina1 in kombinak:
kombinaak.append(kombina1)
for kombina2 in kombinak:
if kombinak.index(kombina1) != kombinak.index(kombina2) and kombinak.index(kombina1.title()) != kombinak.index(kombina2.title()):
kombinaak.append(kombina1+kombina2)
komb1 = list(komb(kombinaa, bdss))
komb2 = list(komb(kombinaaw, wbdss))
komb3 = list(komb(kombinaak, kbdss))
komb4 = list(komb(kombinaa, years))
komb5 = list(komb(kombinaac, years))
komb6 = list(komb(kombinaaw, years))
komb7 = list(komb(kombinaak, years))
komb8 = list(komb(word, bdss))
komb9 = list(komb(word, wbdss))
komb10 = list(komb(word, kbdss))
komb11 = list(komb(word, years))
komb12 = ['']
komb13 = ['']
komb14 = ['']
komb15 = ['']
komb16 = ['']
komb21 = ['']
if randnum == "y":
komb12 = list(concats(word, numfrom, numto))
komb13 = list(concats(kombinaa, numfrom, numto))
komb14 = list(concats(kombinaac, numfrom, numto))
komb15 = list(concats(kombinaaw, numfrom, numto))
komb16 = list(concats(kombinaak, numfrom, numto))
komb21 = list(concats(reverse, numfrom, numto))
komb17 = list(komb(reverse, years))
komb18 = list(komb(rev_w, wbdss))
komb19 = list(komb(rev_k, kbdss))
komb20 = list(komb(rev_n, bdss))
komb001 = ['']
komb002 = ['']
komb003 = ['']
komb004 = ['']
komb005 = ['']
komb006 = ['']
if spechars1 == "y":
komb001 = list(komb(kombinaa, spechars))
komb002 = list(komb(kombinaac, spechars))
komb003 = list(komb(kombinaaw , spechars))
komb004 = list(komb(kombinaak , spechars))
komb005 = list(komb(word, spechars))
komb006 = list(komb(reverse, spechars))
print "[+] Sorting list and removing duplicates..."
komb_unique1 = dict.fromkeys(komb1).keys()
komb_unique2 = dict.fromkeys(komb2).keys()
komb_unique3 = dict.fromkeys(komb3).keys()
komb_unique4 = dict.fromkeys(komb4).keys()
komb_unique5 = dict.fromkeys(komb5).keys()
komb_unique6 = dict.fromkeys(komb6).keys()
komb_unique7 = dict.fromkeys(komb7).keys()
komb_unique8 = dict.fromkeys(komb8).keys()
komb_unique9 = dict.fromkeys(komb9).keys()
komb_unique10 = dict.fromkeys(komb10).keys()
komb_unique11 = dict.fromkeys(komb11).keys()
komb_unique12 = dict.fromkeys(komb12).keys()
komb_unique13 = dict.fromkeys(komb13).keys()
komb_unique14 = dict.fromkeys(komb14).keys()
komb_unique15 = dict.fromkeys(komb15).keys()
komb_unique16 = dict.fromkeys(komb16).keys()
komb_unique17 = dict.fromkeys(komb17).keys()
komb_unique18 = dict.fromkeys(komb18).keys()
komb_unique19 = dict.fromkeys(komb19).keys()
komb_unique20 = dict.fromkeys(komb20).keys()
komb_unique21 = dict.fromkeys(komb21).keys()
komb_unique01 = dict.fromkeys(kombinaa).keys()
komb_unique02 = dict.fromkeys(kombinaac).keys()
komb_unique03 = dict.fromkeys(kombinaaw).keys()
komb_unique04 = dict.fromkeys(kombinaak).keys()
komb_unique05 = dict.fromkeys(word).keys()
komb_unique07 = dict.fromkeys(komb001).keys()
komb_unique08 = dict.fromkeys(komb002).keys()
komb_unique09 = dict.fromkeys(komb003).keys()
komb_unique010 = dict.fromkeys(komb004).keys()
komb_unique011 = dict.fromkeys(komb005).keys()
komb_unique012 = dict.fromkeys(komb006).keys()
uniqlist = bdss+wbdss+kbdss+reverse+komb_unique01+komb_unique02+komb_unique03+komb_unique04+komb_unique05+komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8+komb_unique9+komb_unique10+komb_unique11+komb_unique12+komb_unique13+komb_unique14+komb_unique15+komb_unique16+komb_unique17+komb_unique18+komb_unique19+komb_unique20+komb_unique21+komb_unique07+komb_unique08+komb_unique09+komb_unique010+komb_unique011+komb_unique012
unique_lista = dict.fromkeys(uniqlist).keys()
unique_leet = []
if leetmode == "y":
for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too...
x = x.replace('a',a)
x = x.replace('i',i)
x = x.replace('e',e)
x = x.replace('t',t)
x = x.replace('o',o)
x = x.replace('s',s)
x = x.replace('g',g)
x = x.replace('z',z)
unique_leet.append(x)
unique_list = unique_lista + unique_leet
unique_list_finished = []
for x in unique_list:
if len(x) > wcfrom and len(x) < wcto:
unique_list_finished.append(x)
unique_list_finished.sort()
f = open ( name+'.txt', 'w' )
f.write (os.linesep.join(unique_list_finished))
f = open ( name+'.txt', 'r' )
lines = 0
for line in f:
lines += 1
f.close()
print "[+] Saving dictionary to \033[1;31m"+name+".txt\033[1;m, counting \033[1;31m"+str(lines)+"\033[1;m words."
print "[+] Now load your pistolero with \033[1;31m"+name+".txt\033[1;m and shoot! Good luck!"
exit()
elif sys.argv[1] == '-a':
url = config.get('alecto','alectourl')
print "\r\n[+] Checking if alectodb is not present..."
if os.path.isfile('alectodb.csv.gz') == 0:
print "[+] Downloading alectodb.csv.gz..."
webFile = urllib.urlopen(url)
localFile = open(url.split('/')[-1], 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
f = gzip.open('alectodb.csv.gz', 'rb')
data = csv.reader(f)
usernames = []
passwords = []
for row in data:
usernames.append(row[5])
passwords.append(row[6])
gus = list(set(usernames))
gpa = list(set(passwords))
gus.sort()
gpa.sort()
print "\r\n[+] Exporting to alectodb-usernames.txt and alectodb-passwords.txt\r\n[+] Done."
f = open ( 'alectodb-usernames.txt', 'w' )
f.write (os.linesep.join(gus))
f.close()
f = open ( 'alectodb-passwords.txt', 'w' )
f.write (os.linesep.join(gpa))
f.close()
f.close()
sys.exit()
elif sys.argv[1] == '-l':
ftpname = config.get('downloader','ftpname')
ftpurl = config.get('downloader','ftpurl')
ftppath = config.get('downloader','ftppath')
ftpuser = config.get('downloader','ftpuser')
ftppass = config.get('downloader','ftppass')
if os.path.isdir('dictionaries') == 0:
os.mkdir('dictionaries')
print " \r\n Choose the section you want to download:\r\n"
print " 1 Moby 14 french 27 places"
print " 2 afrikaans 15 german 28 polish"
print " 3 american 16 hindi 39 random"
print " 4 aussie 17 hungarian 30 religion"
print " 5 chinese 18 italian 31 russian"
print " 6 computer 19 japanese 32 science"
print " 7 croatian 20 latin 33 spanish"
print " 8 czech 21 literature 34 swahili"
print " 9 danish 22 movieTV 35 swedish"
print " 10 databases 23 music 36 turkish"
print " 11 dictionaries 24 names 37 yiddish"
print " 12 dutch 25 net 38 exit program"
print " 13 finnish 26 norwegian \r\n"
print " \r\n Files will be downloaded from "+ftpname+" repository"
print " \r\n Tip: After downloading wordlist, you can improve it with -w option\r\n"
filedown = raw_input("[>] Enter number: ")
filedown.isdigit()
while filedown.isdigit() == 0:
print "\r\n[-] Wrong choice. "
filedown = raw_input("[>] Enter number: ")
filedown = str(filedown)
while int(filedown) > 38:
print "\r\n[-] Wrong choice. "
filedown = raw_input("[>] Enter number: ")
filedown = str(filedown)
def handleDownload(block):
file.write(block)
print ".",
def downloader():
ftp.login(ftpuser, ftppass)
ftp.cwd(ftppath)
def filequitter():
file.close()
print ' done.'
if filedown == "1":
print "\r\n[+] connecting...\r\n"
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('Moby')
if os.path.isdir('dictionaries/Moby/') == 0:
os.mkdir('dictionaries/Moby/')
dire = 'dictionaries/Moby/'
file = open(dire+'mhyph.tar.gz', 'wb')
print "\r\n[+] downloading mhyph.tar.gz..."
ftp.retrbinary('RETR ' + 'mhyph.tar.gz', handleDownload)
filequitter()
file = open(dire+'mlang.tar.gz', 'wb')
print "\r\n[+] downloading mlang.tar.gz..."
ftp.retrbinary('RETR ' + 'mlang.tar.gz', handleDownload)
filequitter()
file = open(dire+'moby.tar.gz', 'wb')
print "\r\n[+] downloading moby.tar.gz..."
ftp.retrbinary('RETR ' + 'moby.tar.gz', handleDownload)
filequitter()
file = open(dire+'mpos.tar.gz', 'wb')
print "\r\n[+] downloading mpos.tar.gz..."
ftp.retrbinary('RETR ' + 'mpos.tar.gz', handleDownload)
filequitter()
file = open(dire+'mpron.tar.gz', 'wb')
print "\r\n[+] downloading mpron.tar.gz..."
ftp.retrbinary('RETR ' + 'mpron.tar.gz', handleDownload)
filequitter()
file = open(dire+'mthes.tar.gz', 'wb')
print "\r\n[+] downloading mthes.tar.gz..."
ftp.retrbinary('RETR ' + 'mthes.tar.gz', handleDownload)
filequitter()
file = open(dire+'mwords.tar.gz', 'wb')
print "\r\n[+] downloading mwords.tar.gz..."
ftp.retrbinary('RETR ' + 'mwords.tar.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "2":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('afrikaans')
if os.path.isdir('dictionaries/afrikaans/') == 0:
os.mkdir('dictionaries/afrikaans/')
dire = 'dictionaries/afrikaans/'
file = open(dire+'afr_dbf.zip', 'wb')
print "\r\n[+] downloading afr_dbf.zip..."
ftp.retrbinary('RETR ' + 'afr_dbf.zip', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "3":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('american')
if os.path.isdir('dictionaries/american/') == 0:
os.mkdir('dictionaries/american/')
dire = 'dictionaries/american/'
file = open(dire+'dic-0294.tar.gz', 'wb')
print "\r\n[+] downloading dic-0294.tar.gz..."
ftp.retrbinary('RETR ' + 'dic-0294.tar.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "4":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('aussie')
if os.path.isdir('dictionaries/aussie/') == 0:
os.mkdir('dictionaries/aussie/')
dire = 'dictionaries/aussie/'
file = open(dire+'oz.Z', 'wb')
print "\r\n[+] downloading oz.Z..."
ftp.retrbinary('RETR ' + 'oz.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "5":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('chinese')
if os.path.isdir('dictionaries/chinese/') == 0:
os.mkdir('dictionaries/chinese/')
dire = 'dictionaries/chinese/'
file = open(dire+'chinese.Z', 'wb')
print "\r\n[+] downloading chinese.Z..."
ftp.retrbinary('RETR ' + 'chinese.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "6":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('computer')
if os.path.isdir('dictionaries/computer/') == 0:
os.mkdir('dictionaries/computer/')
dire = 'dictionaries/computer/'
file = open(dire+'Domains.Z', 'wb')
print "\r\n[+] downloading Domains.Z..."
ftp.retrbinary('RETR ' + 'Domains.Z', handleDownload)
filequitter()
file = open(dire+'Dosref.Z', 'wb')
print "\r\n[+] downloading Dosref.Z..."
ftp.retrbinary('RETR ' + 'Dosref.Z', handleDownload)
filequitter()
file = open(dire+'Ftpsites.Z', 'wb')
print "\r\n[+] downloading Ftpsites.Z..."
ftp.retrbinary('RETR ' + 'Ftpsites.Z', handleDownload)
filequitter()
file = open(dire+'Jargon.Z', 'wb')
print "\r\n[+] downloading Jargon.Z..."
ftp.retrbinary('RETR ' + 'Jargon.Z', handleDownload)
filequitter()
file = open(dire+'common-passwords.txt.Z', 'wb')
print "\r\n[+] downloading common-passwords.txt.Z..."
ftp.retrbinary('RETR ' + 'common-passwords.txt.Z', handleDownload)
filequitter()
file = open(dire+'etc-hosts.Z', 'wb')
print "\r\n[+] downloading etc-hosts.Z..."
ftp.retrbinary('RETR ' + 'etc-hosts.Z', handleDownload)
filequitter()
file = open(dire+'foldoc.gz', 'wb')
print "\r\n[+] downloading foldoc.gz..."
ftp.retrbinary('RETR ' + 'foldoc.gz', handleDownload)
filequitter()
file = open(dire+'language-list.Z', 'wb')
print "\r\n[+] downloading language-list.Z..."
ftp.retrbinary('RETR ' + 'language-list.Z', handleDownload)
filequitter()
file = open(dire+'unix.Z', 'wb')
print "\r\n[+] downloading unix.Z..."
ftp.retrbinary('RETR ' + 'unix.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "7":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('croatian')
if os.path.isdir('dictionaries/croatian/') == 0:
os.mkdir('dictionaries/croatian/')
dire = 'dictionaries/croatian/'
file = open(dire+'croatian.gz', 'wb')
print "\r\n[+] downloading croatian.gz..."
ftp.retrbinary('RETR ' + 'croatian.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "8":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('czech')
if os.path.isdir('dictionaries/czech/') == 0:
os.mkdir('dictionaries/czech/')
dire = 'dictionaries/czech/'
file = open(dire+'czech-wordlist-ascii-cstug-novak.Z', 'wb')
print "\r\n[+] downloading czech-wordlist-ascii-cstug-novak.Z..."
ftp.retrbinary('RETR ' + 'czech-wordlist-ascii-cstug-novak.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "9":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('danish')
if os.path.isdir('dictionaries/danish/') == 0:
os.mkdir('dictionaries/danish/')
dire = 'dictionaries/danish/'
file = open(dire+'danish.words.Z', 'wb')
print "\r\n[+] downloading danish.words.Z..."
ftp.retrbinary('RETR ' + 'danish.words.Z', handleDownload)
filequitter()
file = open(dire+'dansk.zip', 'wb')
print "\r\n[+] downloading dansk.zip..."
ftp.retrbinary('RETR ' + 'dansk.zip', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "10":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('databases')
if os.path.isdir('dictionaries/databases/') == 0:
os.mkdir('dictionaries/databases/')
dire = 'dictionaries/databases/'
file = open(dire+'acronyms.Z', 'wb')
print "\r\n[+] downloading acronyms.Z..."
ftp.retrbinary('RETR ' + 'acronyms.Z', handleDownload)
filequitter()
file = open(dire+'att800.Z', 'wb')
print "\r\n[+] downloading att800.Z..."
ftp.retrbinary('RETR ' + 'att800.Z', handleDownload)
filequitter()
file = open(dire+'computer-companies.Z', 'wb')
print "\r\n[+] downloading computer-companies.Z..."
ftp.retrbinary('RETR ' + 'computer-companies.Z', handleDownload)
filequitter()
file = open(dire+'world_heritage.Z', 'wb')
print "\r\n[+] downloading world_heritage.Z..."
ftp.retrbinary('RETR ' + 'world_heritage.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "11":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('dictionaries')
if os.path.isdir('dictionaries/dictionaries/') == 0:
os.mkdir('dictionaries/dictionaries/')
dire = 'dictionaries/dictionaries/'
file = open(dire+'Antworth.gz', 'wb')
print "\r\n[+] downloading Antworth.gz..."
ftp.retrbinary('RETR ' + 'Antworth.gz', handleDownload)
filequitter()
file = open(dire+'CRL.words.gz', 'wb')
print "\r\n[+] downloading CRL.words.gz..."
ftp.retrbinary('RETR ' + 'CRL.words.gz', handleDownload)
filequitter()
file = open(dire+'Roget.words.gz', 'wb')
print "\r\n[+] downloading Roget.words.gz..."
ftp.retrbinary('RETR ' + 'Roget.words.gz', handleDownload)
filequitter()
file = open(dire+'Unabr.dict.gz', 'wb')
print "\r\n[+] downloading Unabr.dict.gz..."
ftp.retrbinary('RETR ' + 'Unabr.dict.gz', handleDownload)
filequitter()
file = open(dire+'Unix.dict.gz', 'wb')
print "\r\n[+] downloading Unix.dict.gz..."
ftp.retrbinary('RETR ' + 'Unix.dict.gz', handleDownload)
filequitter()
file = open(dire+'englex-dict.gz', 'wb')
print "\r\n[+] downloading englex-dict.gz..."
ftp.retrbinary('RETR ' + 'englex-dict.gz', handleDownload)
filequitter()
file = open(dire+'knuth_britsh.gz', 'wb')
print "\r\n[+] downloading knuth_britsh.gz..."
ftp.retrbinary('RETR ' + 'knuth_britsh.gz', handleDownload)
filequitter()
file = open(dire+'knuth_words.gz', 'wb')
print "\r\n[+] downloading knuth_words.gz..."
ftp.retrbinary('RETR ' + 'knuth_words.gz', handleDownload)
filequitter()
file = open(dire+'pocket-dic.gz', 'wb')
print "\r\n[+] downloading pocket-dic.gz..."
ftp.retrbinary('RETR ' + 'pocket-dic.gz', handleDownload)
filequitter()
file = open(dire+'shakesp-glossary.gz', 'wb')
print "\r\n[+] downloading shakesp-glossary.gz..."
ftp.retrbinary('RETR ' + 'shakesp-glossary.gz', handleDownload)
filequitter()
file = open(dire+'special.eng.gz', 'wb')
print "\r\n[+] downloading special.eng.gz..."
ftp.retrbinary('RETR ' + 'special.eng.gz', handleDownload)
filequitter()
file = open(dire+'words-english.gz', 'wb')
print "\r\n[+] downloading words-english.gz..."
ftp.retrbinary('RETR ' + 'words-english.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "12":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('dutch')
if os.path.isdir('dictionaries/dutch/') == 0:
os.mkdir('dictionaries/dutch/')
dire = 'dictionaries/dutch/'
file = open(dire+'words.dutch.Z', 'wb')
print "\r\n[+] downloading words.dutch.Z..."
ftp.retrbinary('RETR ' + 'words.dutch.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "13":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('finnish')
if os.path.isdir('dictionaries/finnish/') == 0:
os.mkdir('dictionaries/finnish/')
dire = 'dictionaries/finnish/'
file = open(dire+'finnish.gz', 'wb')
print "\r\n[+] downloading finnish.gz..."
ftp.retrbinary('RETR ' + 'finnish.gz', handleDownload)
filequitter()
file = open(dire+'firstnames.finnish.gz', 'wb')
print "\r\n[+] downloading firstnames.finnish.gz..."
ftp.retrbinary('RETR ' + 'firstnames.finnish.gz', handleDownload)
filequitter()
file = open(dire+'words.finnish.FAQ.gz', 'wb')
print "\r\n[+] downloading words.finnish.FAQ.gz..."
ftp.retrbinary('RETR ' + 'words.finnish.FAQ.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "14":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('french')
if os.path.isdir('dictionaries/french/') == 0:
os.mkdir('dictionaries/french/')
dire = 'dictionaries/french/'
file = open(dire+'dico.Z', 'wb')
print "\r\n[+] downloading dico.Z..."
ftp.retrbinary('RETR ' + 'dico.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "15":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('german')
if os.path.isdir('dictionaries/german/') == 0:
os.mkdir('dictionaries/german/')
dire = 'dictionaries/german/'
file = open(dire+'deutsch.dic.Z', 'wb')
print "\r\n[+] downloading deutsch.dic.Z..."
ftp.retrbinary('RETR ' + 'deutsch.dic.Z', handleDownload)
filequitter()
file = open(dire+'germanl.Z', 'wb')
print "\r\n[+] downloading germanl.Z..."
ftp.retrbinary('RETR ' + 'germanl.Z', handleDownload)
filequitter()
file = open(dire+'words.german.Z', 'wb')
print "\r\n[+] downloading words.german.Z..."
ftp.retrbinary('RETR ' + 'words.german.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "16":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('hindi')
if os.path.isdir('dictionaries/hindi/') == 0:
os.mkdir('dictionaries/hindi/')
dire = 'dictionaries/hindi/'
file = open(dire+'hindu-names.Z', 'wb')
print "\r\n[+] downloading hindu-names.Z..."
ftp.retrbinary('RETR ' + 'hindu-names.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "17":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('hungarian')
if os.path.isdir('dictionaries/hungarian/') == 0:
os.mkdir('dictionaries/hungarian/')
dire = 'dictionaries/hungarian/'
file = open(dire+'hungarian.gz', 'wb')
print "\r\n[+] downloading hungarian.gz..."
ftp.retrbinary('RETR ' + 'hungarian.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "18":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('italian')
if os.path.isdir('dictionaries/italian/') == 0:
os.mkdir('dictionaries/italian/')
dire = 'dictionaries/italian/'
file = open(dire+'words.italian.Z', 'wb')
print "\r\n[+] downloading words.italian.Z..."
ftp.retrbinary('RETR ' + 'words.italian.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "19":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('japanese')
if os.path.isdir('dictionaries/japanese/') == 0:
os.mkdir('dictionaries/japanese/')
dire = 'dictionaries/japanese/'
file = open(dire+'words.japanese.Z', 'wb')
print "\r\n[+] downloading words.japanese.Z..."
ftp.retrbinary('RETR ' + 'words.japanese.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "20":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('latin')
if os.path.isdir('dictionaries/latin/') == 0:
os.mkdir('dictionaries/latin/')
dire = 'dictionaries/latin/'
file = open(dire+'wordlist.aug.Z', 'wb')
print "\r\n[+] downloading wordlist.aug.Z..."
ftp.retrbinary('RETR ' + 'wordlist.aug.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "21":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('literature')
if os.path.isdir('dictionaries/literature/') == 0:
os.mkdir('dictionaries/literature/')
dire = 'dictionaries/literature/'
file = open(dire+'LCarrol.gz', 'wb')
print "\r\n[+] downloading LCarrol.gz..."
ftp.retrbinary('RETR ' + 'LCarrol.gz', handleDownload)
filequitter()
file = open(dire+'Paradise.Lost.gz', 'wb')
print "\r\n[+] downloading Paradise.Lost.gz..."
ftp.retrbinary('RETR ' + 'Paradise.Lost.gz', handleDownload)
filequitter()
file = open(dire+'aeneid.gz', 'wb')
print "\r\n[+] downloading aeneid.gz..."
ftp.retrbinary('RETR ' + 'aeneid.gz', handleDownload)
filequitter()
file = open(dire+'arthur.gz', 'wb')
print "\r\n[+] downloading arthur.gz..."
ftp.retrbinary('RETR ' + 'arthur.gz', handleDownload)
filequitter()
file = open(dire+'cartoon.gz', 'wb')
print "\r\n[+] downloading cartoon.gz..."
ftp.retrbinary('RETR ' + 'cartoon.gz', handleDownload)
filequitter()
file = open(dire+'cartoons-olivier.gz', 'wb')
print "\r\n[+] downloading cartoons-olivier.gz..."
ftp.retrbinary('RETR ' + 'cartoons-olivier.gz', handleDownload)
filequitter()
file = open(dire+'charlemagne.gz', 'wb')
print "\r\n[+] downloading charlemagne.gz..."
ftp.retrbinary('RETR ' + 'charlemagne.gz', handleDownload)
filequitter()
file = open(dire+'fable.gz', 'wb')
print "\r\n[+] downloading fable.gz..."
ftp.retrbinary('RETR ' + 'fable.gz', handleDownload)
filequitter()
file = open(dire+'iliad.gz', 'wb')
print "\r\n[+] downloading iliad.gz..."
ftp.retrbinary('RETR ' + 'iliad.gz', handleDownload)
filequitter()
file = open(dire+'myths-legends.gz', 'wb')
print "\r\n[+] downloading myths-legends.gz..."
ftp.retrbinary('RETR ' + 'myths-legends.gz', handleDownload)
filequitter()
file = open(dire+'odyssey.gz', 'wb')
print "\r\n[+] downloading odyssey.gz..."
ftp.retrbinary('RETR ' + 'odyssey.gz', handleDownload)
filequitter()
file = open(dire+'sf.gz', 'wb')
print "\r\n[+] downloading sf.gz..."
ftp.retrbinary('RETR ' + 'sf.gz', handleDownload)
filequitter()
file = open(dire+'shakespeare.gz', 'wb')
print "\r\n[+] downloading shakespeare.gz..."
ftp.retrbinary('RETR ' + 'shakespeare.gz', handleDownload)
filequitter()
file = open(dire+'tolkien.words.gz', 'wb')
print "\r\n[+] downloading tolkien.words.gz..."
ftp.retrbinary('RETR ' + 'tolkien.words.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "22":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('movieTV')
if os.path.isdir('dictionaries/movieTV/') == 0:
os.mkdir('dictionaries/movieTV/')
dire = 'dictionaries/movieTV/'
file = open(dire+'Movies.Z', 'wb')
print "\r\n[+] downloading Movies.Z..."
ftp.retrbinary('RETR ' + 'Movies.Z', handleDownload)
filequitter()
file = open(dire+'Python.Z', 'wb')
print "\r\n[+] downloading Python.Z..."
ftp.retrbinary('RETR ' + 'Python.Z', handleDownload)
filequitter()
file = open(dire+'Trek.Z', 'wb')
print "\r\n[+] downloading Trek.Z..."
ftp.retrbinary('RETR ' + 'Trek.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "23":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('music')
if os.path.isdir('dictionaries/music/') == 0:
os.mkdir('dictionaries/music/')
dire = 'dictionaries/music/'
file = open(dire+'music-classical.gz', 'wb')
print "\r\n[+] downloading music-classical.gz..."
ftp.retrbinary('RETR ' + 'music-classical.gz', handleDownload)
filequitter()
file = open(dire+'music-country.gz', 'wb')
print "\r\n[+] downloading music-country.gz..."
ftp.retrbinary('RETR ' + 'music-country.gz', handleDownload)
filequitter()
file = open(dire+'music-jazz.gz', 'wb')
print "\r\n[+] downloading music-jazz.gz..."
ftp.retrbinary('RETR ' + 'music-jazz.gz', handleDownload)
filequitter()
file = open(dire+'music-other.gz', 'wb')
print "\r\n[+] downloading music-other.gz..."
ftp.retrbinary('RETR ' + 'music-other.gz', handleDownload)
filequitter()
file = open(dire+'music-rock.gz', 'wb')
print "\r\n[+] downloading music-rock.gz..."
ftp.retrbinary('RETR ' + 'music-rock.gz', handleDownload)
filequitter()
file = open(dire+'music-shows.gz', 'wb')
print "\r\n[+] downloading music-shows.gz..."
ftp.retrbinary('RETR ' + 'music-shows.gz', handleDownload)
filequitter()
file = open(dire+'rock-groups.gz', 'wb')
print "\r\n[+] downloading rock-groups.gz..."
ftp.retrbinary('RETR ' + 'rock-groups.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "24":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('names')
if os.path.isdir('dictionaries/names/') == 0:
os.mkdir('dictionaries/names/')
dire = 'dictionaries/names/'
file = open(dire+'ASSurnames.gz', 'wb')
print "\r\n[+] downloading ASSurnames.gz..."
ftp.retrbinary('RETR ' + 'ASSurnames.gz', handleDownload)
filequitter()
file = open(dire+'Congress.gz', 'wb')
print "\r\n[+] downloading Congress.gz..."
ftp.retrbinary('RETR ' + 'Congress.gz', handleDownload)
filequitter()
file = open(dire+'Family-Names.gz', 'wb')
print "\r\n[+] downloading Family-Names.gz..."
ftp.retrbinary('RETR ' + 'Family-Names.gz', handleDownload)
filequitter()
file = open(dire+'Given-Names.gz', 'wb')
print "\r\n[+] downloading Given-Names.gz..."
ftp.retrbinary('RETR ' + 'Given-Names.gz', handleDownload)
filequitter()
file = open(dire+'actor-givenname.gz', 'wb')
print "\r\n[+] downloading actor-givenname.gz..."
ftp.retrbinary('RETR ' + 'actor-givenname.gz', handleDownload)
filequitter()
file = open(dire+'actor-surname.gz', 'wb')
print "\r\n[+] downloading actor-surname.gz..."
ftp.retrbinary('RETR ' + 'actor-surname.gz', handleDownload)
filequitter()
file = open(dire+'cis-givenname.gz', 'wb')
print "\r\n[+] downloading cis-givenname.gz..."
ftp.retrbinary('RETR ' + 'cis-givenname.gz', handleDownload)
filequitter()
file = open(dire+'cis-surname.gz', 'wb')
print "\r\n[+] downloading cis-surname.gz..."
ftp.retrbinary('RETR ' + 'cis-surname.gz', handleDownload)
filequitter()
file = open(dire+'crl-names.gz', 'wb')
print "\r\n[+] downloading crl-names.gz..."
ftp.retrbinary('RETR ' + 'crl-names.gz', handleDownload)
filequitter()
file = open(dire+'famous.gz', 'wb')
print "\r\n[+] downloading famous.gz..."
ftp.retrbinary('RETR ' + 'famous.gz', handleDownload)
filequitter()
file = open(dire+'fast-names.gz', 'wb')
print "\r\n[+] downloading fast-names.gz..."
ftp.retrbinary('RETR ' + 'fast-names.gz', handleDownload)
filequitter()
file = open(dire+'female-names-kantr.gz', 'wb')
print "\r\n[+] downloading female-names-kantr.gz..."
ftp.retrbinary('RETR ' + 'female-names-kantr.gz', handleDownload)
filequitter()
file = open(dire+'female-names.gz', 'wb')
print "\r\n[+] downloading female-names.gz..."
ftp.retrbinary('RETR ' + 'female-names.gz', handleDownload)
filequitter()
file = open(dire+'givennames-ol.gz', 'wb')
print "\r\n[+] downloading givennames-ol.gz..."
ftp.retrbinary('RETR ' + 'givennames-ol.gz', handleDownload)
filequitter()
file = open(dire+'male-names-kantr.gz', 'wb')
print "\r\n[+] downloading male-names-kantr.gz..."
ftp.retrbinary('RETR ' + 'male-names-kantr.gz', handleDownload)
filequitter()
file = open(dire+'male-names.gz', 'wb')
print "\r\n[+] downloading male-names.gz..."
ftp.retrbinary('RETR ' + 'male-names.gz', handleDownload)
filequitter()
file = open(dire+'movie-characters.gz', 'wb')
print "\r\n[+] downloading movie-characters.gz..."
ftp.retrbinary('RETR ' + 'movie-characters.gz', handleDownload)
filequitter()
file = open(dire+'names.french.gz', 'wb')
print "\r\n[+] downloading names.french.gz..."
ftp.retrbinary('RETR ' + 'names.french.gz', handleDownload)
filequitter()
file = open(dire+'names.hp.gz', 'wb')
print "\r\n[+] downloading names.hp.gz..."
ftp.retrbinary('RETR ' + 'names.hp.gz', handleDownload)
filequitter()
file = open(dire+'other-names.gz', 'wb')
print "\r\n[+] downloading other-names.gz..."
ftp.retrbinary('RETR ' + 'other-names.gz', handleDownload)
filequitter()
file = open(dire+'shakesp-names.gz', 'wb')
print "\r\n[+] downloading shakesp-names.gz..."
ftp.retrbinary('RETR ' + 'shakesp-names.gz', handleDownload)
filequitter()
file = open(dire+'surnames-ol.gz', 'wb')
print "\r\n[+] downloading surnames-ol.gz..."
ftp.retrbinary('RETR ' + 'surnames-ol.gz', handleDownload)
filequitter()
file = open(dire+'surnames.finnish.gz', 'wb')
print "\r\n[+] downloading surnames.finnish.gz..."
ftp.retrbinary('RETR ' + 'surnames.finnish.gz', handleDownload)
filequitter()
file = open(dire+'usenet-names.gz', 'wb')
print "\r\n[+] downloading usenet-names.gz..."
ftp.retrbinary('RETR ' + 'usenet-names.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "25":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('net')
if os.path.isdir('dictionaries/net/') == 0:
os.mkdir('dictionaries/net/')
dire = 'dictionaries/net/'
file = open(dire+'hosts-txt.Z', 'wb')
print "\r\n[+] downloading hosts-txt.Z..."
ftp.retrbinary('RETR ' + 'hosts-txt.Z', handleDownload)
filequitter()
file = open(dire+'inet-machines.Z', 'wb')
print "\r\n[+] downloading inet-machines.Z..."
ftp.retrbinary('RETR ' + 'inet-machines.Z', handleDownload)
filequitter()
file = open(dire+'usenet-loginids.Z', 'wb')
print "\r\n[+] downloading usenet-loginids.Z..."
ftp.retrbinary('RETR ' + 'usenet-loginids.Z', handleDownload)
filequitter()
file = open(dire+'usenet-machines.Z', 'wb')
print "\r\n[+] downloading usenet-machines.Z..."
ftp.retrbinary('RETR ' + 'usenet-machines.Z', handleDownload)
filequitter()
file = open(dire+'uunet-sites.Z', 'wb')
print "\r\n[+] downloading uunet-sites.Z..."
ftp.retrbinary('RETR ' + 'uunet-sites.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "26":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('norwegian')
if os.path.isdir('dictionaries/norwegian/') == 0:
os.mkdir('dictionaries/norwegian/')
dire = 'dictionaries/norwegian/'
file = open(dire+'words.norwegian.Z', 'wb')
print "\r\n[+] downloading words.norwegian.Z..."
ftp.retrbinary('RETR ' + 'words.norwegian.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "27":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('places')
if os.path.isdir('dictionaries/places/') == 0:
os.mkdir('dictionaries/places/')
dire = 'dictionaries/places/'
file = open(dire+'Colleges.Z', 'wb')
print "\r\n[+] downloading Colleges.Z..."
ftp.retrbinary('RETR ' + 'Colleges.Z', handleDownload)
filequitter()
file = open(dire+'US-counties.Z', 'wb')
print "\r\n[+] downloading US-counties.Z..."
ftp.retrbinary('RETR ' + 'US-counties.Z', handleDownload)
filequitter()
file = open(dire+'World.factbook.Z', 'wb')
print "\r\n[+] downloading World.factbook.Z..."
ftp.retrbinary('RETR ' + 'World.factbook.Z', handleDownload)
filequitter()
file = open(dire+'Zipcodes.Z', 'wb')
print "\r\n[+] downloading Zipcodes.Z..."
ftp.retrbinary('RETR ' + 'Zipcodes.Z', handleDownload)
filequitter()
file = open(dire+'places.Z', 'wb')
print "\r\n[+] downloading places.Z..."
ftp.retrbinary('RETR ' + 'places.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "28":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('polish')
if os.path.isdir('dictionaries/polish/') == 0:
os.mkdir('dictionaries/polish/')
dire = 'dictionaries/polish/'
file = open(dire+'words.polish.Z', 'wb')
print "\r\n[+] downloading words.polish.Z..."
ftp.retrbinary('RETR ' + 'words.polish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "29":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('random')
if os.path.isdir('dictionaries/random/') == 0:
os.mkdir('dictionaries/random/')
dire = 'dictionaries/random/'
file = open(dire+'Ethnologue.gz', 'wb')
print "\r\n[+] downloading Ethnologue.gz..."
ftp.retrbinary('RETR ' + 'Ethnologue.gz', handleDownload)
filequitter()
file = open(dire+'abbr.gz', 'wb')
print "\r\n[+] downloading abbr.gz..."
ftp.retrbinary('RETR ' + 'abbr.gz', handleDownload)
filequitter()
file = open(dire+'chars.gz', 'wb')
print "\r\n[+] downloading chars.gz..."
ftp.retrbinary('RETR ' + 'chars.gz', handleDownload)
filequitter()
file = open(dire+'dogs.gz', 'wb')
print "\r\n[+] downloading dogs.gz..."
ftp.retrbinary('RETR ' + 'dogs.gz', handleDownload)
filequitter()
file = open(dire+'drugs.gz', 'wb')
print "\r\n[+] downloading drugs.gz..."
ftp.retrbinary('RETR ' + 'drugs.gz', handleDownload)
filequitter()
file = open(dire+'junk.gz', 'wb')
print "\r\n[+] downloading junk.gz..."
ftp.retrbinary('RETR ' + 'junk.gz', handleDownload)
filequitter()
file = open(dire+'numbers.gz', 'wb')
print "\r\n[+] downloading numbers.gz..."
ftp.retrbinary('RETR ' + 'numbers.gz', handleDownload)
filequitter()
file = open(dire+'phrases.gz', 'wb')
print "\r\n[+] downloading phrases.gz..."
ftp.retrbinary('RETR ' + 'phrases.gz', handleDownload)
filequitter()
file = open(dire+'sports.gz', 'wb')
print "\r\n[+] downloading sports.gz..."
ftp.retrbinary('RETR ' + 'sports.gz', handleDownload)
filequitter()
file = open(dire+'statistics.gz', 'wb')
print "\r\n[+] downloading statistics.gz..."
ftp.retrbinary('RETR ' + 'statistics.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "30":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('religion')
if os.path.isdir('dictionaries/religion/') == 0:
os.mkdir('dictionaries/religion/')
dire = 'dictionaries/religion/'
file = open(dire+'Koran.Z', 'wb')
print "\r\n[+] downloading Koran.Z..."
ftp.retrbinary('RETR ' + 'Koran.Z', handleDownload)
filequitter()
file = open(dire+'kjbible.Z', 'wb')
print "\r\n[+] downloading kjbible.Z..."
ftp.retrbinary('RETR ' + 'kjbible.Z', handleDownload)
filequitter()
file = open(dire+'norse.Z', 'wb')
print "\r\n[+] downloading norse.Z..."
ftp.retrbinary('RETR ' + 'norse.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "31":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('russian')
if os.path.isdir('dictionaries/russian/') == 0:
os.mkdir('dictionaries/russian/')
dire = 'dictionaries/russian/'
file = open(dire+'russian.lst.Z', 'wb')
print "\r\n[+] downloading russian.lst.Z..."
ftp.retrbinary('RETR ' + 'russian.lst.Z', handleDownload)
filequitter()
file = open(dire+'russian_words.koi8.Z', 'wb')
print "\r\n[+] downloading russian_words.koi8.Z..."
ftp.retrbinary('RETR ' + 'russian_words.koi8.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "32":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('science')
if os.path.isdir('dictionaries/science/') == 0:
os.mkdir('dictionaries/science/')
dire = 'dictionaries/science/'
file = open(dire+'Acr-diagnosis.gz', 'wb')
print "\r\n[+] downloading Acr-diagnosis.gz..."
ftp.retrbinary('RETR ' + 'Acr-diagnosis.gz', handleDownload)
filequitter()
file = open(dire+'Algae.gz', 'wb')
print "\r\n[+] downloading Algae.gz..."
ftp.retrbinary('RETR ' + 'Algae.gz', handleDownload)
filequitter()
file = open(dire+'Bacteria.gz', 'wb')
print "\r\n[+] downloading Bacteria.gz..."
ftp.retrbinary('RETR ' + 'Bacteria.gz', handleDownload)
filequitter()
file = open(dire+'Fungi.gz', 'wb')
print "\r\n[+] downloading Fungi.gz..."
ftp.retrbinary('RETR ' + 'Fungi.gz', handleDownload)
filequitter()
file = open(dire+'Microalgae.gz', 'wb')
print "\r\n[+] downloading Microalgae.gz..."
ftp.retrbinary('RETR ' + 'Microalgae.gz', handleDownload)
filequitter()
file = open(dire+'Viruses.gz', 'wb')
print "\r\n[+] downloading Viruses.gz..."
ftp.retrbinary('RETR ' + 'Viruses.gz', handleDownload)
filequitter()
file = open(dire+'asteroids.Z', 'wb')
print "\r\n[+] downloading asteroids.Z..."
ftp.retrbinary('RETR ' + 'asteroids.Z', handleDownload)
filequitter()
file = open(dire+'biology.Z', 'wb')
print "\r\n[+] downloading biology.Z..."
ftp.retrbinary('RETR ' + 'biology.Z', handleDownload)
filequitter()
file = open(dire+'tech.gz', 'wb')
print "\r\n[+] downloading tech.gz..."
ftp.retrbinary('RETR ' + 'tech.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "33":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('spanish')
if os.path.isdir('dictionaries/spanish/') == 0:
os.mkdir('dictionaries/spanish/')
dire = 'dictionaries/spanish/'
file = open(dire+'words.spanish.Z', 'wb')
print "\r\n[+] downloading words.spanish.Z..."
ftp.retrbinary('RETR ' + 'words.spanish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "34":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('swahili')
if os.path.isdir('dictionaries/swahili/') == 0:
os.mkdir('dictionaries/swahili/')
dire = 'dictionaries/swahili/'
file = open(dire+'swahili.gz', 'wb')
print "\r\n[+] downloading swahili.gz..."
ftp.retrbinary('RETR ' + 'swahili.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "35":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('swedish')
if os.path.isdir('dictionaries/swedish/') == 0:
os.mkdir('dictionaries/swedish/')
dire = 'dictionaries/swedish/'
file = open(dire+'words.swedish.Z', 'wb')
print "\r\n[+] downloading words.swedish.Z..."
ftp.retrbinary('RETR ' + 'words.swedish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "36":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('turkish')
if os.path.isdir('dictionaries/turkish/') == 0:
os.mkdir('dictionaries/turkish/')
dire = 'dictionaries/turkish/'
file = open(dire+'turkish.dict.gz', 'wb')
print "\r\n[+] downloading turkish.dict.gz..."
ftp.retrbinary('RETR ' + 'turkish.dict.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "37":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('yiddish')
if os.path.isdir('dictionaries/yiddish/') == 0:
os.mkdir('dictionaries/yiddish/')
dire = 'dictionaries/yiddish/'
file = open(dire+'yiddish.Z', 'wb')
print "\r\n[+] downloading yiddish.Z..."
ftp.retrbinary('RETR ' + 'yiddish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
else:
print '[-] leaving.'
exit()
else:
print "\r\n[Usage]: "+sys.argv[0] +" [OPTIONS] \r\n"
print "[Help]: "+sys.argv[0] +" -h\r\n"
exit() | apache-2.0 | -5,743,812,412,542,314,000 | 29.003751 | 478 | 0.633408 | false |
ISCDtoolbox/FaciLe | pipeline/createDatabase.py | 1 | 2873 | import os
import sys
import numpy as np
from copy import deepcopy
import argparse
#Parallel
import subprocess as sp
import multiprocessing as mp
sys.path.append(os.path.join(os.path.dirname(__file__),"../projects/tools"))
import msh
import executable_paths as exe
def parse():
parser = argparse.ArgumentParser(description="Creates mandible and masseter files for the database creation")
parser.add_argument("-i", "--inputDir", help="input directory", type=str, required=True)
parser.add_argument("-o", "--outputDir", help="output directory", type=str, required=True)
return parser.parse_args()
def checkArgs(args):
if not os.path.exists(args.inputDir):
print args.input + "is not a valid directory"
sys.exit()
if not len([f for f in os.listdir(args.inputDir) if f[0]=="."]) == 0:
print args.inputDir + " is an empty directory"
sys.exit()
if not os.path.exists(args.outputDir):
print args.outputDir + " does not exist, creating"
os.system("mkdir " + args.outputDir)
args.inputDir = os.path.abspath(args.inputDir)
args.outputDir = os.path.abspath(args.outputDir)
def command(cmd, displayOutput=False):
err = 1
print "Running the command '" + cmd + "'"
if displayOutput:
err = os.system(cmd)
else:
err = os.system(cmd + " > tmp_out.txt 2>tmp_err.txt")
if err:
print "An error happened while executing:\n"+cmd+"\nLook in tmp_out.txt or tmp_err.txt for info\nExiting..."
sys.exit()
else:
os.system("rm tmp_out.txt tmp_err.txt >/dev/null 2>&1")
def work(in_file):
"""Defines the work unit on an input file"""
root = '.'.join(in_file.split("/")[-1].split(".")[:-1])
if not os.path.exists("tmp_"+root):
os.mkdir("tmp_"+root)
os.chdir("tmp_"+root)
os.system("cp /home/norgeot/dev/own/FaciLe/projects/warping/demo/sphere.o1.mesh ./sphere.mesh")
cmd = " ".join([exe.processSkull, "-i " + in_file, "-t ../../OsTemplate.mesh",">",root+"_OUT.txt"])
print "Starting the skull processing for " + in_file
#os.system(cmd)
print "Skull processing finished for " + in_file
#clean the working directories
for ext in [".warped.mesh", ".box.1.o.", "mat","_OUT.txt"]:
for f in os.listdir("."):
if ext in f:
os.rename(f, os.path.join(args.outputDir,f))
for f in os.listdir("."):
if ".mesh" in f or ".sol" in f:
#os.remove(f)
#print f + " was successfully removed"
a=2
return 0
if __name__=="__main__":
args = parse()
checkArgs(args)
files = [os.path.join(args.inputDir,f) for f in os.listdir(args.inputDir) if ".mesh" in f]
#Set up the parallel task pool to use all available processors
count = mp.cpu_count()
pool = mp.Pool(processes=count)
pool.map(work, files)
| gpl-3.0 | 9,033,572,557,116,844,000 | 31.647727 | 116 | 0.622346 | false |
iain-peddie/well-behaved-python | tests/WellBehavedPythonTests/Discovery/ModuleExaminerTests.py | 1 | 3114 | #!/usr/bin/env python3
# Copyright 2013 Iain Peddie [email protected]
#
# This file is part of WellBehavedPython
#
# WellBehavedPython is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WellBehavedPython is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WellBehavedPython. If not, see <http://www.gnu.org/licenses/>.
from WellBehavedPython.api import *
from WellBehavedPython.Engine.TestCase import TestCase
from WellBehavedPython.Discovery.ModuleExaminer import ModuleExaminer
class ModuleExaminerTests(TestCase):
def test_examiner_can_find__only_class_in_simple_module(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleModule');
# When
classes = examiner.listAllClasses()
# The classes have been imported
# Then
from ..Samples import SampleModule
expect(classes).toEqual([SampleModule.SampleTests])
def test_examiner_can_find_all_classes_in_complex_module(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleComplexModule');
# When
classes = examiner.listAllClasses()
# The classes have been imported
# Then
from ..Samples import SampleComplexModule
expect(classes).toContain(SampleComplexModule.SampleFirstTests)
expect(classes).toContain(SampleComplexModule.SampleSecondTests)
expect(classes).toContain(SampleComplexModule.StandaloneClass)
def test_examiner_can_find_all_modules(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples');
# When
modules = examiner.listAllModules();
# Then
from ..Samples import SampleModule
from ..Samples import SampleComplexModule
expect(modules).toContain('WellBehavedPythonTests.Samples.SampleModule');
expect(modules).toContain('WellBehavedPythonTests.Samples.SampleComplexModule');
def test_examiner_is_not_recursive_for_modules(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests');
# When
modules = examiner.listAllModules();
# Then
expect(modules).toContain('WellBehavedPythonTests.BackwardsCompatibilityTests');
expect(modules).Not.toContain('WellBehavedPythonTests.Discovery.Samples.SampleModule');
def test_examining_can_find_subpackages(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests')
# When
packages = examiner.listAllPackages()
# Then
expect(packages).toContain('WellBehavedPythonTests.Discovery')
| gpl-3.0 | 8,893,566,164,321,204,000 | 35.635294 | 95 | 0.706487 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.