text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import webapp2
from shop.jobs import clean_unverified_signups
class CleanupUnverifiedSignupRequests(webapp2.RequestHandler):
def get(self):
clean_unverified_signups.job()
| our-city-app/oca-backend | src/shop/cron.py | Python | apache-2.0 | 827 | 0 |
#!/usr/bin/python
""" README
This script is deprecated. Please use https://github.com/device42/servicenow_device42_mapping
This script reads CIs from Servicenow and uploads them to Device42.
It has 2 modes:
1. Full migration - when the TIMEFRAME is set to 0
2. Synchronization - when the TIMEFRAME is set to anything else but 0
GOTCHAS
* In order for hardwares to be migrated, hardware must have unique name.
* When there are multiple devices with the same name, device name is constructed as: "device_name" + "_" + "servicenow_sys_id"
i.e. " MacBook Air 13" " will become " MacBook Air 13"_01a9280d3790200044e0bfc8bcbe5d79 "
*
"""
import sys
from srvnow2d42 import ServiceNow
__version__ = "2.0.2"
__status__ = "Production"
# ===== Device42 ===== #
D42_USER = 'admin'
D42_PWD = 'adm!nd42'
D42_URL = 'https://192.168.3.30'
# ===== ServiceNow ===== #
USERNAME = 'admin'
PASSWORD = 'admin123'
BASE_URL = 'https://dev13852.service-now.com/api/now/table/'
LIMIT = 1000000 # number of CIs to retrieve from ServiceNow
HEADERS = {"Content-Type":"application/json","Accept":"application/json"}
TABLES = ['cmdb_ci_server' , 'cmdb_ci_computer', 'cmdb_ci_app_server', 'cmdb_ci_database', 'cmdb_ci_email_server',
'cmdb_ci_ftp_server', 'cmdb_ci_directory_server', 'cmdb_ci_ip_server']
# ===== Other ===== #
DEBUG = True # print to STDOUT
DRY_RUN = False # Upload to Device42 or not
ZONE_AS_ROOM = True # for the explanation take a look at get_zones() docstring
TIMEFRAME = 0 # Value represents hours. If set to 0, script does full migration, if set to any other value,
# script syncs changes back from till now(). now() refers to current localtime.
if __name__ == '__main__':
snow = ServiceNow(D42_URL, D42_USER, D42_PWD, USERNAME, PASSWORD, BASE_URL, LIMIT,
HEADERS, DEBUG, DRY_RUN, ZONE_AS_ROOM, TIMEFRAME)
snow.create_db()
snow.get_relationships()
snow.get_manufacturers()
snow.get_hardware()
snow.get_locations()
snow.get_buildings()
snow.get_rooms()
if ZONE_AS_ROOM:
snow.get_zones()
snow.get_racks()
for table in TABLES:
snow.get_computers(table)
snow.get_adapters()
snow.get_ips()
snow.upload_adapters()
sys.exit()
| device42/servicenow_to_device42_sync | starter.py | Python | mit | 2,360 | 0.013136 |
import urlparse
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.http import Request
from ..items import Car
# Add the model(s) below you want to search for
# e.g. MODELS = ['Outback']
MODELS = []
# Enter the domain name(s) here you have permission to retrieve data from
# e.g. DOMAINS = ['http://www.example.com', 'http://www.example.com']
DOMAINS = []
class SubaruSpider(BaseSpider):
name = 'subaru'
def start_requests(self):
for domain in DOMAINS:
for model in MODELS:
url = urlparse.urljoin(domain, 'used-inventory/index.htm?listingConfigId=auto-used&make=Subaru&model=%s' % model)
yield Request(url)
def parse(self, response):
sel = Selector(response)
# Extract any cars found
cars = sel.xpath('//*[contains(@class, "inv-type-used")]')
for c in cars:
car = Car()
# Title and year
car['title'] = c.xpath('.//div/div/h1/a/text()').extract()[0].strip()
car['year'] = car['title'][0:4]
# Price, but remove non-number characters.
# Examples: '$12,000', 'Please Call', etc.
price = c.xpath('.//*[contains(@class, "value")]/text()').extract()[0]
car['price'] = ''.join(d for d in price if d.isdigit())
# url
path = c.xpath('.//div/div/h1/a/@href').extract()[0]
url = urlparse.urlparse(response.url)
car['url'] = urlparse.urlunsplit([url.scheme, url.netloc, path, None, None])
# Certain specs are frequently missing, so we need to handle
# them with try / except
specs = [
{
'name': 'vin',
'xpath': './/*/dt[text()="VIN:"]/following-sibling::dd/text()'
},
{
'name': 'color',
'xpath': './/*/dt[text()="Exterior Color:"]/following-sibling::dd/text()'
},
{
'name': 'miles',
'xpath': './/*/dt[text()="Mileage:"]/following-sibling::dd/text()'
},
{
'name': 'transmission',
'xpath': './/*/dt[text()="Transmission:"]/following-sibling::dd/text()'
}
]
for s in specs:
try:
car[s['name']] = c.xpath(s['xpath']).extract()[0]
except IndexError:
car[s['name']] = None
yield car
# If there's a next page link, parse it for cars as well
next_links = sel.xpath('//*[@rel="next"]/@href').extract()
if len(next_links) > 0:
query = next_links[0]
url = urlparse.urlparse(response.url)
base = urlparse.urlunsplit([url.scheme, url.netloc, url.path, None, None])
next_url = urlparse.urljoin(base, query)
# Construct url
yield Request(next_url, callback=self.parse)
| JeffPaine/subaru_search | subaru/spiders/subaru_spider.py | Python | mit | 3,059 | 0.002942 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Extra commands for setup.py.
In addition to providing a few extra command classes in `l10n_cmdclass`,
we also modify the standard `distutils.command.build` and
`setuptools.command.install_lib` classes so that the relevant l10n commands
for compiling catalogs are issued upon install.
"""
from StringIO import StringIO
from itertools import izip
import os
import re
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from distutils import log
from distutils.cmd import Command
from distutils.command.build import build as _build
from distutils.errors import DistutilsOptionError
from setuptools.command.install_lib import install_lib as _install_lib
try:
from babel.messages.catalog import TranslationError
from babel.messages.extract import extract_javascript
from babel.messages.frontend import extract_messages, init_catalog, \
compile_catalog, update_catalog
from babel.messages.pofile import read_po
from babel.support import Translations
from babel.util import parse_encoding
_GENSHI_MARKUP_SEARCH = re.compile(r'\[[0-9]+:').search
_DEFAULT_KWARGS_MAPS = {
'Option': {'doc': 4},
'BoolOption': {'doc': 4},
'IntOption': {'doc': 4},
'FloatOption': {'doc': 4},
'ListOption': {'doc': 6},
'ChoiceOption': {'doc': 4},
'PathOption': {'doc': 4},
'ExtensionOption': {'doc': 5},
'OrderedExtensionsOption': {'doc': 6},
}
_DEFAULT_CLEANDOC_KEYWORDS = (
'ConfigSection', 'Option', 'BoolOption', 'IntOption', 'FloatOption',
'ListOption', 'ChoiceOption', 'PathOption', 'ExtensionOption',
'OrderedExtensionsOption', 'cleandoc_',
)
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code, This is patched
extract_python from Babel to support keyword argument mapping.
`kwargs_maps` option: names of keyword arguments will be mapping to
index of messages array.
`cleandoc_keywords` option: a list of keywords to clean up the
extracted messages with `cleandoc`.
"""
from trac.util.text import cleandoc
funcname = lineno = message_lineno = None
kwargs_maps = func_kwargs_map = None
call_stack = -1
buf = []
messages = []
messages_kwargs = {}
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = str(parse_encoding(fileobj) or
options.get('encoding', 'iso-8859-1'))
kwargs_maps = _DEFAULT_KWARGS_MAPS.copy()
if 'kwargs_maps' in options:
kwargs_maps.update(options['kwargs_maps'])
cleandoc_keywords = set(_DEFAULT_CLEANDOC_KEYWORDS)
if 'cleandoc_keywords' in options:
cleandoc_keywords.update(options['cleandoc_keywords'])
tokens = generate_tokens(fileobj.readline)
tok = value = None
for _ in tokens:
prev_tok, prev_value = tok, value
tok, value, (lineno, _), _, _ = _
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
kwarg_name = None
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue
# appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
for name, message in messages_kwargs.iteritems():
if name not in func_kwargs_map:
continue
index = func_kwargs_map[name]
while index >= len(messages):
messages.append(None)
messages[index - 1] = message
if funcname in cleandoc_keywords:
messages = [m and cleandoc(m) for m in messages]
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
kwarg_name = func_kwargs_map = None
call_stack = -1
messages = []
messages_kwargs = {}
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == '=' and prev_tok == NAME:
kwarg_name = prev_value
elif tok == OP and value == ',':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
kwarg_name = None
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = func_kwargs_map = kwarg_name = None
elif tok == NAME and value in keywords:
funcname = value
func_kwargs_map = kwargs_maps.get(funcname, {})
kwarg_name = None
def extract_javascript_script(fileobj, keywords, comment_tags, options):
"""Extract messages from Javascript embedding in <script> tags.
Select <script type="javascript/text"> tags and delegate to
`extract_javascript`.
"""
from genshi.core import Stream
from genshi.input import XMLParser
out = StringIO()
stream = Stream(XMLParser(fileobj))
stream = stream.select('//script[@type="text/javascript"]')
stream.render(out=out, encoding='utf-8')
out.seek(0)
return extract_javascript(out, keywords, comment_tags, options)
class generate_messages_js(Command):
"""Generating message javascripts command for use ``setup.py`` scripts.
"""
description = 'generate message javascript files from binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'O',
"name of the output directory"),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>.js')"),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify either the '
'output file or directory')
def run(self):
mo_files = []
js_files = []
def js_path(dir, locale):
return os.path.join(dir, locale + '.js')
if not self.input_file:
if self.locale:
mo_files.append((self.locale,
os.path.join(self.input_dir, self.locale,
'LC_MESSAGES',
self.domain + '.mo')))
js_files.append(js_path(self.output_dir, self.locale))
else:
for locale in os.listdir(self.input_dir):
mo_file = os.path.join(self.input_dir, locale,
'LC_MESSAGES',
self.domain + '.mo')
if os.path.exists(mo_file):
mo_files.append((locale, mo_file))
js_files.append(js_path(self.output_dir, locale))
else:
mo_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(js_path(self.output_dir, locale))
if not mo_files:
raise DistutilsOptionError('no compiled catalogs found')
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
for idx, (locale, mo_file) in enumerate(mo_files):
js_file = js_files[idx]
log.info('generating messages javascript %r to %r',
mo_file, js_file)
with open(mo_file, 'rb') as infile:
t = Translations(infile, self.domain)
catalog = t._catalog
with open(js_file, 'w') as outfile:
write_js(outfile, catalog, self.domain, locale)
class check_catalog(Command):
"""Check message catalog command for use ``setup.py`` scripts."""
description = 'check message catalog files, like `msgfmt --check`'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
def run(self):
for filename in self._get_po_files():
log.info('checking catalog %s', filename)
f = open(filename)
try:
catalog = read_po(f, domain=self.domain)
finally:
f.close()
for message in catalog:
for error in self._check_message(catalog, message):
log.warn('%s:%d: %s', filename, message.lineno, error)
def _get_po_files(self):
if self.input_file:
return [self.input_file]
if self.locale:
return [os.path.join(self.input_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')]
files = []
for locale in os.listdir(self.input_dir):
filename = os.path.join(self.input_dir, locale, 'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(filename):
files.append(filename)
return sorted(files)
def _check_message(self, catalog, message):
errors = [e for e in message.check(catalog)]
try:
check_genshi_markup(catalog, message)
except TranslationError as e:
errors.append(e)
return errors
def check_genshi_markup(catalog, message):
"""Verify the genshi markups in the translation."""
msgids = message.id
if not isinstance(msgids, (list, tuple)):
msgids = (msgids,)
msgstrs = message.string
if not isinstance(msgstrs, (list, tuple)):
msgstrs = (msgstrs,)
# check using genshi-markup
if not _GENSHI_MARKUP_SEARCH(msgids[0]):
return
for msgid, msgstr in izip(msgids, msgstrs):
if msgstr:
_validate_genshi_markup(msgid, msgstr)
def _validate_genshi_markup(markup, alternative):
indices_markup = _parse_genshi_markup(markup)
indices_alternative = _parse_genshi_markup(alternative)
indices = indices_markup - indices_alternative
if indices:
raise TranslationError(
'genshi markups are unbalanced %s' % \
' '.join(['[%d:]' % idx for idx in indices]))
def _parse_genshi_markup(message):
from genshi.filters.i18n import parse_msg
try:
return set([idx for idx, text in parse_msg(message)
if idx > 0])
except Exception as e:
raise TranslationError('cannot parse message (%s: %s)' % \
(e.__class__.__name__, unicode(e)))
def write_js(fileobj, catalog, domain, locale):
from trac.util.presentation import to_json
data = {'domain': domain, 'locale': locale}
messages = {}
for msgid, msgstr in catalog.iteritems():
if isinstance(msgid, (list, tuple)):
messages.setdefault(msgid[0], {})
messages[msgid[0]][msgid[1]] = msgstr
elif msgid:
messages[msgid] = msgstr
else:
for line in msgstr.splitlines():
line = line.strip()
if not line:
continue
if ':' not in line:
continue
name, val = line.split(':', 1)
name = name.strip().lower()
if name == 'plural-forms':
data['plural_expr'] = pluralexpr(val)
break
data['messages'] = messages
fileobj.write('// Generated messages javascript file '
'from compiled MO file\n')
fileobj.write('babel.Translations.load(')
fileobj.write(to_json(data).encode('utf-8'))
fileobj.write(').install();\n')
def pluralexpr(forms):
match = re.search(r'\bplural\s*=\s*([^;]+)', forms)
if not match:
raise ValueError('Failed to parse plural_forms %r' % (forms,))
return match.group(1)
def get_command_overriders():
# 'bdist_wininst' runs a 'build', so make the latter
# run a 'compile_catalog' before 'build_py'
class build(_build):
sub_commands = [('compile_catalog', None)] + _build.sub_commands
# 'bdist_egg' isn't that nice, all it does is an 'install_lib'
class install_lib(_install_lib): # playing setuptools' own tricks ;-)
def l10n_run(self):
self.run_command('compile_catalog')
def run(self):
self.l10n_run()
# When bdist_egg is called on distribute 0.6.29 and later, the
# egg file includes no *.mo and *.js files which are generated
# in l10n_run() method.
# We remove build_py.data_files property to re-compute in order
# to avoid the issue (#11640).
build_py = self.get_finalized_command('build_py')
if 'data_files' in build_py.__dict__ and \
not any(any(name.endswith('.mo') for name in filenames)
for pkg, src_dir, build_dir, filenames
in build_py.data_files):
del build_py.__dict__['data_files']
_install_lib.run(self)
return build, install_lib
def get_l10n_cmdclass():
build, install_lib = get_command_overriders()
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
}
def get_l10n_js_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
}
def get_l10n_trac_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
build.sub_commands.insert(0, ('compile_catalog_tracini', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_tracini')
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
'extract_messages_tracini': extract_messages,
'init_catalog_tracini': init_catalog,
'compile_catalog_tracini': compile_catalog,
'update_catalog_tracini': update_catalog,
'check_catalog_tracini': check_catalog,
}
except ImportError:
def get_l10n_cmdclass():
return
def get_l10n_js_cmdclass():
return
def get_l10n_trac_cmdclass():
return
| pkdevbox/trac | trac/dist.py | Python | bsd-3-clause | 22,240 | 0.000899 |
# This loop monitors the rf Discharges for a particular amplitude, then repeats for other amplitudes
# n
from DAQ.Environment import *
def scanRF(LowestAmp, HighestAmp, step, numScans):
# setup
AmpList = []
fileSystem = Environs.FileSystem
file = \
fileSystem.GetDataDirectory(\
fileSystem.Paths["scanMasterDataPath"])\
+ fileSystem.GenerateNextDataFileName()
print("Saving as " + file + "_" + "MeasuredRF1Amp" + "*.zip")
print("")
# start looping
r = range(int(10*LowestAmp), int(10*HighestAmp), int(10*step))
for i in range(len(r)):
print "hc:rf1 Amplitude -> " + str(float(r[i])/10)
hc.SetGreenSynthAmp(float(r[i])/10)
# hc.GreenSynthOnAmplitude = double(r[i]/10)
hc.EnableGreenSynth( False )
hc.EnableGreenSynth( True )
hc.UpdateRFPowerMonitor()
rfAmpMeasured = hc.RF1PowerCentre
hc.StepTarget(2)
System.Threading.Thread.Sleep(500)
sm.AcquireAndWait(numScans)
scanPath = file + "_" + str(i) + "_" + str(rfAmpMeasured) + ".zip"
sm.SaveData(scanPath)
AmpList.append(str(rfAmpMeasured))
print "List of Measured Amplitudes =" + str(AmpList).strip('[]')
def run_script():
print "Use scanRF(LowestAmp, HighestAmp, step, numScans)"
| ColdMatter/EDMSuite | EDMScripts/OldScripts/MonitorRFDischargesScanSynthAmp.py | Python | mit | 1,182 | 0.032995 |
import time
import rlp
import trie
import db
import utils
import processblock
import transactions
import logging
import copy
import sys
from repoze.lru import lru_cache
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
INITIAL_DIFFICULTY = 2 ** 17
GENESIS_PREVHASH = '\00' * 32
GENESIS_COINBASE = "0" * 40
GENESIS_NONCE = utils.sha3(chr(42))
GENESIS_GAS_LIMIT = 10 ** 6
MIN_GAS_LIMIT = 125000
GASLIMIT_EMA_FACTOR = 1024
BLOCK_REWARD = 1500 * utils.denoms.finney
UNCLE_REWARD = 15 * BLOCK_REWARD / 16
NEPHEW_REWARD = BLOCK_REWARD / 32
BLOCK_DIFF_FACTOR = 1024
GENESIS_MIN_GAS_PRICE = 0
BLKLIM_FACTOR_NOM = 6
BLKLIM_FACTOR_DEN = 5
DIFF_ADJUSTMENT_CUTOFF = 5
RECORDING = 1
NONE = 0
VERIFYING = -1
GENESIS_INITIAL_ALLOC = \
{"51ba59315b3a95761d0863b05ccc7a7f54703d99": 2 ** 200, # (G)
"e6716f9544a56c530d868e4bfbacb172315bdead": 2 ** 200, # (J)
"b9c015918bdaba24b4ff057a92a3873d6eb201be": 2 ** 200, # (V)
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": 2 ** 200, # (A)
"2ef47100e0787b915105fd5e3f4ff6752079d5cb": 2 ** 200, # (M)
"cd2a3d9f938e13cd947ec05abc7fe734df8dd826": 2 ** 200, # (R)
"6c386a4b26f73c802f34673f7248bb118f97424a": 2 ** 200, # (HH)
"e4157b34ea9615cfbde6b4fda419828124b70c78": 2 ** 200, # (CH)
}
block_structure = [
["prevhash", "bin", "\00" * 32],
["uncles_hash", "bin", utils.sha3(rlp.encode([]))],
["coinbase", "addr", GENESIS_COINBASE],
["state_root", "trie_root", trie.BLANK_ROOT],
["tx_list_root", "trie_root", trie.BLANK_ROOT],
["difficulty", "int", INITIAL_DIFFICULTY],
["number", "int", 0],
["min_gas_price", "int", GENESIS_MIN_GAS_PRICE],
["gas_limit", "int", GENESIS_GAS_LIMIT],
["gas_used", "int", 0],
["timestamp", "int", 0],
["extra_data", "bin", ""],
["nonce", "bin", ""],
]
block_structure_rev = {}
for i, (name, typ, default) in enumerate(block_structure):
block_structure_rev[name] = [i, typ, default]
acct_structure = [
["nonce", "int", 0],
["balance", "int", 0],
["storage", "trie_root", trie.BLANK_ROOT],
["code", "hash", ""],
]
acct_structure_rev = {}
for i, (name, typ, default) in enumerate(acct_structure):
acct_structure_rev[name] = [i, typ, default]
def calc_difficulty(parent, timestamp):
offset = parent.difficulty / BLOCK_DIFF_FACTOR
sign = 1 if timestamp - parent.timestamp < DIFF_ADJUSTMENT_CUTOFF else -1
return parent.difficulty + offset * sign
def calc_gaslimit(parent):
prior_contribution = parent.gas_limit * (GASLIMIT_EMA_FACTOR - 1)
new_contribution = parent.gas_used * BLKLIM_FACTOR_NOM / BLKLIM_FACTOR_DEN
gl = (prior_contribution + new_contribution) / GASLIMIT_EMA_FACTOR
return max(gl, MIN_GAS_LIMIT)
class UnknownParentException(Exception):
pass
class TransientBlock(object):
"""
Read only, non persisted, not validated representation of a block
"""
def __init__(self, rlpdata):
self.rlpdata = rlpdata
self.header_args, transaction_list, uncles = rlp.decode(rlpdata)
self.hash = utils.sha3(rlp.encode(self.header_args))
self.transaction_list = transaction_list # rlp encoded transactions
self.uncles = uncles
for i, (name, typ, default) in enumerate(block_structure):
setattr(self, name, utils.decoders[typ](self.header_args[i]))
def __repr__(self):
return '<TransientBlock(#%d %s %s)>' %\
(self.number, self.hash.encode('hex')[
:4], self.prevhash.encode('hex')[:4])
def check_header_pow(header):
assert len(header[-1]) == 32
rlp_Hn = rlp.encode(header[:-1])
nonce = header[-1]
diff = utils.decoders['int'](header[block_structure_rev['difficulty'][0]])
h = utils.sha3(utils.sha3(rlp_Hn) + nonce)
return utils.big_endian_to_int(h) < 2 ** 256 / diff
class Block(object):
def __init__(self,
prevhash='\00' * 32,
uncles_hash=block_structure_rev['uncles_hash'][2],
coinbase=block_structure_rev['coinbase'][2],
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
difficulty=block_structure_rev['difficulty'][2],
number=0,
min_gas_price=block_structure_rev['min_gas_price'][2],
gas_limit=block_structure_rev['gas_limit'][2],
gas_used=0, timestamp=0, extra_data='', nonce='',
transaction_list=[],
uncles=[],
header=None):
self.prevhash = prevhash
self.uncles_hash = uncles_hash
self.coinbase = coinbase
self.difficulty = difficulty
self.number = number
self.min_gas_price = min_gas_price
self.gas_limit = gas_limit
self.gas_used = gas_used
self.timestamp = timestamp
self.extra_data = extra_data
self.nonce = nonce
self.uncles = uncles
self.suicides = []
self.postqueue = []
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'all': {}
}
self.journal = []
self.transactions = trie.Trie(utils.get_db_path(), tx_list_root)
self.transaction_count = 0
self.state = trie.Trie(utils.get_db_path(), state_root)
self.proof_mode = None
self.proof_nodes = []
# If transaction_list is None, then it's a block header imported for
# SPV purposes
if transaction_list is not None:
# support init with transactions only if state is known
assert self.state.root_hash_valid()
for tx_lst_serialized, state_root, gas_used_encoded \
in transaction_list:
self._add_transaction_to_list(
tx_lst_serialized, state_root, gas_used_encoded)
if tx_list_root != self.transactions.root_hash:
raise Exception("Transaction list root hash does not match!")
if not self.is_genesis() and self.nonce and\
not check_header_pow(header or self.list_header()):
raise Exception("PoW check failed")
# make sure we are all on the same db
assert self.state.db.db == self.transactions.db.db
# use de/encoders to check type and validity
for name, typ, d in block_structure:
v = getattr(self, name)
assert utils.decoders[typ](utils.encoders[typ](v)) == v
# Basic consistency verifications
if not self.state.root_hash_valid():
raise Exception(
"State Merkle root not found in database! %r" % self)
if not self.transactions.root_hash_valid():
raise Exception(
"Transactions root not found in database! %r" % self)
if len(self.extra_data) > 1024:
raise Exception("Extra data cannot exceed 1024 bytes")
if self.coinbase == '':
raise Exception("Coinbase cannot be empty address")
def validate_uncles(self):
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
# Check uncle validity
ancestor_chain = [self]
# Uncle can have a block from 2-7 blocks ago as its parent
for i in [1, 2, 3, 4, 5, 6, 7]:
if ancestor_chain[-1].number > 0:
ancestor_chain.append(ancestor_chain[-1].get_parent())
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.list_header() for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
if not check_header_pow(uncle):
sys.stderr.write('1\n\n')
return False
# uncle's parent cannot be the block's own parent
prevhash = uncle[block_structure_rev['prevhash'][0]]
if prevhash not in eligible_ancestor_hashes:
logger.debug("%r: Uncle does not have a valid ancestor", self)
sys.stderr.write('2 ' + prevhash.encode('hex') + ' ' + str(map(lambda x: x.encode('hex'), eligible_ancestor_hashes)) + '\n\n')
return False
if uncle in ineligible:
sys.stderr.write('3\n\n')
logger.debug("%r: Duplicate uncle %r", self, utils.sha3(rlp.encode(uncle)).encode('hex'))
return False
ineligible.append(uncle)
return True
def is_genesis(self):
return self.prevhash == GENESIS_PREVHASH and \
self.nonce == GENESIS_NONCE
def check_proof_of_work(self, nonce):
H = self.list_header()
H[-1] = nonce
return check_header_pow(H)
@classmethod
def deserialize_header(cls, header_data):
if isinstance(header_data, (str, unicode)):
header_data = rlp.decode(header_data)
assert len(header_data) == len(block_structure)
kargs = {}
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_data[i])
return kargs
@classmethod
def deserialize(cls, rlpdata):
header_args, transaction_list, uncles = rlp.decode(rlpdata)
kargs = cls.deserialize_header(header_args)
kargs['header'] = header_args
kargs['transaction_list'] = transaction_list
kargs['uncles'] = uncles
# if we don't have the state we need to replay transactions
_db = db.DB(utils.get_db_path())
if len(kargs['state_root']) == 32 and kargs['state_root'] in _db:
return Block(**kargs)
elif kargs['prevhash'] == GENESIS_PREVHASH:
return Block(**kargs)
else: # no state, need to replay
try:
parent = get_block(kargs['prevhash'])
except KeyError:
raise UnknownParentException(kargs['prevhash'].encode('hex'))
return parent.deserialize_child(rlpdata)
@classmethod
def init_from_header(cls, rlpdata):
kargs = cls.deserialize_header(rlpdata)
kargs['transaction_list'] = None
kargs['uncles'] = None
return Block(**kargs)
def deserialize_child(self, rlpdata):
"""
deserialization w/ replaying transactions
"""
header_args, transaction_list, uncles = rlp.decode(rlpdata)
assert len(header_args) == len(block_structure)
kargs = dict(transaction_list=transaction_list, uncles=uncles)
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_args[i])
block = Block.init_from_parent(self, kargs['coinbase'],
extra_data=kargs['extra_data'],
timestamp=kargs['timestamp'],
uncles=uncles)
# replay transactions
for tx_lst_serialized, _state_root, _gas_used_encoded in \
transaction_list:
tx = transactions.Transaction.create(tx_lst_serialized)
# logger.debug('state:\n%s', utils.dump_state(block.state))
# logger.debug('applying %r', tx)
success, output = processblock.apply_transaction(block, tx)
#block.add_transaction_to_list(tx) # < this is done by processblock
# logger.debug('state:\n%s', utils.dump_state(block.state))
logger.debug('d %s %s', _gas_used_encoded, block.gas_used)
assert utils.decode_int(_gas_used_encoded) == block.gas_used, \
"Gas mismatch (ours %d, theirs %d) on block: %r" % \
(block.gas_used, _gas_used_encoded, block.to_dict(False, True, True))
assert _state_root == block.state.root_hash, \
"State root mismatch (ours %r theirs %r) on block: %r" % \
(block.state.root_hash.encode('hex'),
_state_root.encode('hex'),
block.to_dict(False, True, True))
block.finalize()
block.uncles_hash = kargs['uncles_hash']
block.nonce = kargs['nonce']
block.min_gas_price = kargs['min_gas_price']
# checks
assert block.prevhash == self.hash
assert block.gas_used == kargs['gas_used']
assert block.gas_limit == kargs['gas_limit']
assert block.timestamp == kargs['timestamp']
assert block.difficulty == kargs['difficulty']
assert block.number == kargs['number']
assert block.extra_data == kargs['extra_data']
assert utils.sha3(rlp.encode(block.uncles)) == kargs['uncles_hash']
assert block.tx_list_root == kargs['tx_list_root']
assert block.state.root_hash == kargs['state_root'], (block.state.root_hash, kargs['state_root'])
return block
@classmethod
def hex_deserialize(cls, hexrlpdata):
return cls.deserialize(hexrlpdata.decode('hex'))
def mk_blank_acct(self):
if not hasattr(self, '_blank_acct'):
codehash = ''
self.state.db.put(codehash, '')
self._blank_acct = [utils.encode_int(0),
utils.encode_int(0),
trie.BLANK_ROOT,
codehash]
return self._blank_acct[:]
def get_acct(self, address):
if len(address) == 40:
address = address.decode('hex')
acct = rlp.decode(self.state.get(address)) or self.mk_blank_acct()
return tuple(utils.decoders[t](acct[i])
for i, (n, t, d) in enumerate(acct_structure))
# _get_acct_item(bin or hex, int) -> bin
def _get_acct_item(self, address, param):
''' get account item
:param address: account address, can be binary or hex string
:param param: parameter to get
'''
if param != 'storage' and address in self.caches[param]:
return self.caches[param][address]
return self.get_acct(address)[acct_structure_rev[param][0]]
# _set_acct_item(bin or hex, int, bin)
def _set_acct_item(self, address, param, value):
''' set account item
:param address: account address, can be binary or hex string
:param param: parameter to set
:param value: new value
'''
# logger.debug('set acct %r %r %d', address, param, value)
self.set_and_journal(param, address, value)
self.set_and_journal('all', address, True)
def set_and_journal(self, cache, index, value):
prev = self.caches[cache].get(index, None)
if prev != value:
self.journal.append([cache, index, prev, value])
self.caches[cache][index] = value
# _delta_item(bin or hex, int, int) -> success/fail
def _delta_item(self, address, param, value):
''' add value to account item
:param address: account address, can be binary or hex string
:param param: parameter to increase/decrease
:param value: can be positive or negative
'''
value = self._get_acct_item(address, param) + value
if value < 0:
return False
self._set_acct_item(address, param, value)
return True
def _add_transaction_to_list(self, tx_lst_serialized,
state_root, gas_used_encoded):
# adds encoded data # FIXME: the constructor should get objects
assert isinstance(tx_lst_serialized, list)
data = [tx_lst_serialized, state_root, gas_used_encoded]
self.transactions.update(
rlp.encode(utils.encode_int(self.transaction_count)),
rlp.encode(data))
self.transaction_count += 1
def add_transaction_to_list(self, tx):
tx_lst_serialized = rlp.decode(tx.serialize())
self._add_transaction_to_list(tx_lst_serialized,
self.state_root,
utils.encode_int(self.gas_used))
def _list_transactions(self):
# returns [[tx_lst_serialized, state_root, gas_used_encoded],...]
txlist = []
for i in range(self.transaction_count):
txlist.append(self.get_transaction(i))
return txlist
def get_transaction(self, num):
# returns [tx_lst_serialized, state_root, gas_used_encoded]
return rlp.decode(self.transactions.get(rlp.encode(utils.encode_int(num))))
def get_transactions(self):
return [transactions.Transaction.create(tx) for
tx, s, g in self._list_transactions()]
def get_nonce(self, address):
return self._get_acct_item(address, 'nonce')
def set_nonce(self, address, value):
return self._set_acct_item(address, 'nonce', value)
def increment_nonce(self, address):
return self._delta_item(address, 'nonce', 1)
def decrement_nonce(self, address):
return self._delta_item(address, 'nonce', -1)
def get_balance(self, address):
return self._get_acct_item(address, 'balance')
def set_balance(self, address, value):
self._set_acct_item(address, 'balance', value)
def delta_balance(self, address, value):
return self._delta_item(address, 'balance', value)
def transfer_value(self, from_addr, to_addr, value):
assert value >= 0
if self.delta_balance(from_addr, -value):
return self.delta_balance(to_addr, value)
return False
def get_code(self, address):
return self._get_acct_item(address, 'code')
def set_code(self, address, value):
self._set_acct_item(address, 'code', value)
def get_storage(self, address):
storage_root = self._get_acct_item(address, 'storage')
return trie.Trie(utils.get_db_path(), storage_root)
def get_storage_data(self, address, index):
if 'storage:'+address in self.caches:
if index in self.caches['storage:'+address]:
return self.caches['storage:'+address][index]
t = self.get_storage(address)
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
key = utils.zpad(utils.coerce_to_bytes(index), 32)
val = rlp.decode(t.get(key))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
return utils.big_endian_to_int(val) if val else 0
def set_storage_data(self, address, index, val):
if 'storage:'+address not in self.caches:
self.caches['storage:'+address] = {}
self.set_and_journal('all', address, True)
self.set_and_journal('storage:'+address, index, val)
def commit_state(self):
changes = []
if not len(self.journal):
processblock.pblogger.log('delta', changes=[])
return
for address in self.caches['all']:
acct = rlp.decode(self.state.get(address.decode('hex'))) \
or self.mk_blank_acct()
for i, (key, typ, default) in enumerate(acct_structure):
if key == 'storage':
t = trie.Trie(utils.get_db_path(), acct[i])
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
for k, v in self.caches.get('storage:'+address, {}).iteritems():
enckey = utils.zpad(utils.coerce_to_bytes(k), 32)
val = rlp.encode(utils.int_to_big_endian(v))
changes.append(['storage', address, k, v])
if v:
t.update(enckey, val)
else:
t.delete(enckey)
acct[i] = t.root_hash
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
else:
if address in self.caches[key]:
v = self.caches[key].get(address, default)
changes.append([key, address, v])
acct[i] = utils.encoders[acct_structure[i][1]](v)
self.state.update(address.decode('hex'), rlp.encode(acct))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(self.state.proof_nodes)
self.state.proof_nodes = []
if processblock.pblogger.log_state_delta:
processblock.pblogger.log('delta', changes=changes)
self.reset_cache()
def del_account(self, address):
self.commit_state()
if len(address) == 40:
address = address.decode('hex')
self.state.delete(address)
def account_to_dict(self, address, with_storage_root=False,
with_storage=True, for_vmtest=False):
if with_storage_root:
assert len(self.journal) == 0
med_dict = {}
for i, val in enumerate(self.get_acct(address)):
name, typ, default = acct_structure[i]
key = acct_structure[i][0]
if name == 'storage':
strie = trie.Trie(utils.get_db_path(), val)
if with_storage_root:
med_dict['storage_root'] = strie.get_root_hash().encode('hex')
else:
med_dict[key] = self.caches[key].get(address, utils.printers[typ](val))
if with_storage:
med_dict['storage'] = {}
d = strie.to_dict()
subcache = self.caches.get('storage:'+address, {})
subkeys = [utils.zpad(utils.coerce_to_bytes(kk), 32) for kk in subcache.keys()]
for k in d.keys() + subkeys:
v = d.get(k, None)
v2 = subcache.get(utils.big_endian_to_int(k), None)
hexkey = '0x'+utils.zunpad(k).encode('hex')
if v2 is not None:
if v2 != 0:
med_dict['storage'][hexkey] = \
'0x'+utils.int_to_big_endian(v2).encode('hex')
elif v is not None:
med_dict['storage'][hexkey] = '0x'+rlp.decode(v).encode('hex')
return med_dict
def reset_cache(self):
self.caches = {
'all': {},
'balance': {},
'nonce': {},
'code': {},
}
self.journal = []
# Revert computation
def snapshot(self):
return {
'state': self.state.root_hash,
'gas': self.gas_used,
'txs': self.transactions,
'txcount': self.transaction_count,
'postqueue': copy.copy(self.postqueue),
'suicides': self.suicides,
'suicides_size': len(self.suicides),
'journal': self.journal, # pointer to reference, so is not static
'journal_size': len(self.journal)
}
def revert(self, mysnapshot):
self.journal = mysnapshot['journal']
logger.debug('reverting')
while len(self.journal) > mysnapshot['journal_size']:
cache, index, prev, post = self.journal.pop()
logger.debug('%r %r %r %r', cache, index, prev, post)
if prev is not None:
self.caches[cache][index] = prev
else:
del self.caches[cache][index]
self.suicides = mysnapshot['suicides']
while len(self.suicides) > mysnapshot['suicides_size']:
self.suicides.pop()
self.state.root_hash = mysnapshot['state']
self.gas_used = mysnapshot['gas']
self.transactions = mysnapshot['txs']
self.transaction_count = mysnapshot['txcount']
self.postqueue = mysnapshot['postqueue']
def finalize(self):
"""
Apply rewards
We raise the block's coinbase account by Rb, the block reward,
and the coinbase of each uncle by 7 of 8 that.
Rb = 1500 finney
"""
self.delta_balance(self.coinbase,
BLOCK_REWARD + NEPHEW_REWARD * len(self.uncles))
for uncle_rlp in self.uncles:
uncle_data = Block.deserialize_header(uncle_rlp)
self.delta_balance(uncle_data['coinbase'], UNCLE_REWARD)
self.commit_state()
def serialize_header_without_nonce(self):
return rlp.encode(self.list_header(exclude=['nonce']))
def get_state_root(self):
self.commit_state()
return self.state.root_hash
def set_state_root(self, state_root_hash):
self.state = trie.Trie(utils.get_db_path(), state_root_hash)
self.reset_cache()
state_root = property(get_state_root, set_state_root)
def get_tx_list_root(self):
return self.transactions.root_hash
tx_list_root = property(get_tx_list_root)
def list_header(self, exclude=[]):
header = []
for name, typ, default in block_structure:
# print name, typ, default , getattr(self, name)
if name not in exclude:
header.append(utils.encoders[typ](getattr(self, name)))
return header
def serialize(self):
# Serialization method; should act as perfect inverse function of the
# constructor assuming no verification failures
return rlp.encode([self.list_header(),
self._list_transactions(),
self.uncles])
def hex_serialize(self):
return self.serialize().encode('hex')
def serialize_header(self):
return rlp.encode(self.list_header())
def hex_serialize_header(self):
return rlp.encode(self.list_header()).encode('hex')
def to_dict(self, with_state=False, full_transactions=False,
with_storage_roots=False, with_uncles=False):
"""
serializes the block
with_state: include state for all accounts
full_transactions: include serialized tx (hashes otherwise)
with_uncles: include uncle hashes
"""
b = {}
for name, typ, default in block_structure:
b[name] = utils.printers[typ](getattr(self, name))
txlist = []
for i in range(self.transaction_count):
tx_rlp = self.transactions.get(rlp.encode(utils.encode_int(i)))
tx, msr, gas = rlp.decode(tx_rlp)
if full_transactions:
txjson = transactions.Transaction.create(tx).to_dict()
else:
txjson = utils.sha3(rlp.descend(tx_rlp, 0)).encode('hex') # tx hash
txlist.append({
"tx": txjson,
"medstate": msr.encode('hex'),
"gas": str(utils.decode_int(gas))
})
b["transactions"] = txlist
if with_state:
state_dump = {}
for address, v in self.state.to_dict().iteritems():
state_dump[address.encode('hex')] = \
self.account_to_dict(address, with_storage_roots)
b['state'] = state_dump
if with_uncles:
b['uncles'] = [utils.sha3(rlp.encode(u)).encode('hex') for u in self.uncles]
return b
def _hash(self):
return utils.sha3(self.serialize_header())
@property
def hash(self):
return self._hash()
def hex_hash(self):
return self.hash.encode('hex')
def get_parent(self):
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent = get_block(self.prevhash)
except KeyError:
raise UnknownParentException(self.prevhash.encode('hex'))
#assert parent.state.db.db == self.state.db.db
return parent
def has_parent(self):
try:
self.get_parent()
return True
except UnknownParentException:
return False
def chain_difficulty(self):
# calculate the summarized_difficulty
if self.is_genesis():
return self.difficulty
elif 'difficulty:'+self.hex_hash() in self.state.db:
return utils.decode_int(
self.state.db.get('difficulty:'+self.hex_hash()))
else:
_idx, _typ, _ = block_structure_rev['difficulty']
o = self.difficulty + self.get_parent().chain_difficulty()
o += sum([utils.decoders[_typ](u[_idx]) for u in self.uncles])
self.state.db.put('difficulty:'+self.hex_hash(), utils.encode_int(o))
return o
def __eq__(self, other):
return isinstance(other, (Block, CachedBlock)) and self.hash == other.hash
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.number > other.number
def __lt__(self, other):
return self.number < other.number
def __repr__(self):
return '<Block(#%d %s %s)>' % (self.number,
self.hex_hash()[:4],
self.prevhash.encode('hex')[:4])
@classmethod
def init_from_parent(cls, parent, coinbase, extra_data='',
timestamp=int(time.time()), uncles=[]):
return Block(
prevhash=parent.hash,
uncles_hash=utils.sha3(rlp.encode(uncles)),
coinbase=coinbase,
state_root=parent.state.root_hash,
tx_list_root=trie.BLANK_ROOT,
difficulty=calc_difficulty(parent, timestamp),
number=parent.number + 1,
min_gas_price=0,
gas_limit=calc_gaslimit(parent),
gas_used=0,
timestamp=timestamp,
extra_data=extra_data,
nonce='',
transaction_list=[],
uncles=uncles)
def set_proof_mode(self, pm, pmnodes=None):
self.proof_mode = pm
self.state.proof_mode = pm
self.proof_nodes = pmnodes or []
self.state.proof_nodes = pmnodes or []
class CachedBlock(Block):
# note: immutable refers to: do not manipulate!
_hash_cached = None
def _set_acct_item(self): raise NotImplementedError
def _add_transaction_to_list(self): raise NotImplementedError
def set_state_root(self): raise NotImplementedError
def revert(self): raise NotImplementedError
def commit_state(self): pass
def _hash(self):
if not self._hash_cached:
self._hash_cached = Block._hash(self)
return self._hash_cached
@classmethod
def create_cached(cls, blk):
blk.__class__ = CachedBlock
return blk
@lru_cache(500)
def get_block(blockhash):
"""
Assumtion: blocks loaded from the db are not manipulated
-> can be cached including hash
"""
return CachedBlock.create_cached(Block.deserialize(db.DB(utils.get_db_path()).get(blockhash)))
def has_block(blockhash):
return blockhash in db.DB(utils.get_db_path())
def genesis(start_alloc=GENESIS_INITIAL_ALLOC, difficulty=INITIAL_DIFFICULTY):
# https://ethereum.etherpad.mozilla.org/11
block = Block(prevhash=GENESIS_PREVHASH, coinbase=GENESIS_COINBASE,
tx_list_root=trie.BLANK_ROOT,
difficulty=difficulty, nonce=GENESIS_NONCE,
gas_limit=GENESIS_GAS_LIMIT)
for addr, balance in start_alloc.iteritems():
block.set_balance(addr, balance)
block.state.db.commit()
return block
def dump_genesis_block_tests_data():
import json
g = genesis()
data = dict(
genesis_state_root=g.state_root.encode('hex'),
genesis_hash=g.hex_hash(),
genesis_rlp_hex=g.serialize().encode('hex'),
initial_alloc=dict()
)
for addr, balance in GENESIS_INITIAL_ALLOC.iteritems():
data['initial_alloc'][addr] = str(balance)
print json.dumps(data, indent=1)
| jnnk/pyethereum | pyethereum/blocks.py | Python | mit | 32,057 | 0.000655 |
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "todo copied scaled name")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
canceled = False
try:
scale(size, smooth, source, target, concurrency)
except KeyboardInterrupt:
Qtrac.report("canceling...")
canceled = True
summarize(concurrency, canceled)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
pipeline = create_pipeline(size, smooth, concurrency)
for i, (sourceImage, targetImage) in enumerate(
get_jobs(source, target)):
pipeline.send((sourceImage, targetImage, i % concurrency))
def create_pipeline(size, smooth, concurrency):
pipeline = None
sink = results()
for who in range(concurrency):
pipeline = scaler(pipeline, sink, size, smooth, who)
return pipeline
def get_jobs(source, target):
for name in os.listdir(source):
yield os.path.join(source, name), os.path.join(target, name)
@Qtrac.coroutine
def scaler(receiver, sink, size, smooth, me):
while True:
sourceImage, targetImage, who = (yield)
if who == me:
try:
result = scale_one(size, smooth, sourceImage, targetImage)
sink.send(result)
except Image.Error as err:
Qtrac.report(str(err), True)
elif receiver is not None:
receiver.send((sourceImage, targetImage, who))
@Qtrac.coroutine
def results():
while True:
result = (yield)
results.todo += result.todo
results.copied += result.copied
results.scaled += result.scaled
Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
os.path.basename(result.name)))
results.todo = results.copied = results.scaled = 0
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(1, 0, 1, targetImage)
def summarize(concurrency, canceled):
message = "copied {} scaled {} ".format(results.copied, results.scaled)
difference = results.todo - (results.copied + results.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} coroutines".format(concurrency)
if canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
| nwiizo/workspace_2017 | pipng/imagescale-c.py | Python | mit | 4,916 | 0.002035 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
LICENSE = open(os.path.join(os.path.dirname(__file__), 'LICENSE.txt')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-spreadsheetresponsemixin',
version='0.1.10',
packages=['spreadsheetresponsemixin'],
include_package_data=True,
license=LICENSE,
description='A mixin for views with a queryset that provides a CSV/Excel export.',
long_description=README,
url='https://github.com/birdsarah/django-spreadsheetresponsemixin',
author='Sarah Bird',
author_email='[email protected]',
install_requires=['django>=1.5', 'openpyxl>=2.0.3'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'],
)
| aptivate/django-spreadsheetresponsemixin | setup.py | Python | gpl-3.0 | 1,365 | 0.001465 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Restrict Partners',
'version': '8.0.1.0.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Sales, Product, Category, Clasification',
'description': """
Sale Restrict Partners
======================
Users with group "Sale - Own Leads" can only see partners that are assigned to him or partners assigned to no one.
It also add actual user as default salesman for new partners
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'sale',
],
'data': [
'security/security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | HBEE/odoo-addons | sale_restrict_partners/__openerp__.py | Python | agpl-3.0 | 1,762 | 0.001135 |
# ------------------------------------------------------------------------------
import os, os.path, sys, re, time, random, types, base64
from appy import Object
import appy.gen
from appy.gen import Search, UiSearch, String, Page
from appy.gen.layout import ColumnLayout
from appy.gen import utils as gutils
from appy.gen.mixins import BaseMixin
from appy.gen.wrappers import AbstractWrapper
from appy.gen.descriptors import ClassDescriptor
from appy.gen.mail import sendMail
from appy.shared import mimeTypes
from appy.shared import utils as sutils
from appy.shared.data import languages
from appy.shared.ldap_connector import LdapConnector
import collections
try:
from AccessControl.ZopeSecurityPolicy import _noroles
except ImportError:
_noroles = []
# Global JS internationalized messages that will be computed in every page -----
jsMessages = ('no_elem_selected', 'action_confirm', 'save_confirm',
'warn_leave_form')
# ------------------------------------------------------------------------------
class ToolMixin(BaseMixin):
_appy_meta_type = 'Tool'
xhtmlEncoding = 'text/html;charset=UTF-8'
def getPortalType(self, metaTypeOrAppyClass):
'''Returns the name of the portal_type that is based on
p_metaTypeOrAppyType.'''
appName = self.getProductConfig().PROJECTNAME
res = metaTypeOrAppyClass
if not isinstance(metaTypeOrAppyClass, str):
res = gutils.getClassName(metaTypeOrAppyClass, appName)
if res.find('_wrappers') != -1:
elems = res.split('_')
res = '%s%s' % (elems[1], elems[4])
if res in ('User', 'Group', 'Translation'): res = appName + res
return res
def home(self):
'''Returns the content of px ToolWrapper.pxHome.'''
tool = self.appy()
return tool.pxHome({'obj': None, 'tool': tool})
def query(self):
'''Returns the content of px ToolWrapper.pxQuery.'''
tool = self.appy()
return tool.pxQuery({'obj': None, 'tool': tool})
def search(self):
'''Returns the content of px ToolWrapper.pxSearch.'''
tool = self.appy()
return tool.pxSearch({'obj': None, 'tool': tool})
def getHomePage(self):
'''Return the home page when a user hits the app.'''
# If the app defines a method "getHomePage", call it.
tool = self.appy()
url = None
try:
url = tool.getHomePage()
except AttributeError:
pass
if not url:
# Bring Managers to the config, lead others to pxHome.
user = self.getUser()
if user.has_role('Manager'):
url = self.goto(self.absolute_url())
else:
url = self.goto('%s/home' % self.absolute_url())
return url
def getHomeObject(self):
'''The concept of "home object" is the object where the user must "be",
even if he is "nowhere". For example, if the user is on a search
screen, there is no contextual object. In this case, if we have a
home object for him, we will use it as contextual object, and its
portlet menu will nevertheless appear: the user will not have the
feeling of being lost.'''
# If the app defines a method "getHomeObject", call it.
try:
return self.appy().getHomeObject()
except AttributeError:
# For managers, the home object is the config. For others, there is
# no default home object.
if self.getUser().has_role('Manager'): return self.appy()
def getCatalog(self):
'''Returns the catalog object.'''
return self.getParentNode().catalog
def getApp(self):
'''Returns the root Zope object.'''
return self.getPhysicalRoot()
def getSiteUrl(self):
'''Returns the absolute URL of this site.'''
return self.getApp().absolute_url()
def getIncludeUrl(self, name, bg=False):
'''Gets the full URL of an external resource, like an image, a
Javascript or a CSS file, named p_name. If p_bg is True, p_name is
an image that is meant to be used in a "style" attribute for defining
the background image of some XHTML tag.'''
# If no extension is found in p_name, we suppose it is a png image.
if '.' not in name: name += '.png'
url = '%s/ui/%s' % (self.getPhysicalRoot().absolute_url(), name)
if not bg: return url
return 'background-image: url(%s)' % url
def doPod(self):
'''Performs an action linked to a pod field: generate, freeze,
unfreeze... a document from a pod field.'''
rq = self.REQUEST
# Get the object that is the target of this action.
obj = self.getObject(rq.get('objectUid'), appy=True)
return obj.getField(rq.get('fieldName')).onUiRequest(obj, rq)
def getAppName(self):
'''Returns the name of the application.'''
return self.getProductConfig().PROJECTNAME
def getPath(self, path):
'''Returns the folder or object whose absolute path p_path.'''
res = self.getPhysicalRoot()
if path == '/': return res
path = path[1:]
if '/' not in path: return res._getOb(path) # For performance
for elem in path.split('/'): res = res._getOb(elem)
return res
def showLanguageSelector(self):
'''We must show the language selector if the app config requires it and
it there is more than 2 supported languages. Moreover, on some pages,
switching the language is not allowed.'''
cfg = self.getProductConfig(True)
if not cfg.languageSelector: return
if len(cfg.languages) < 2: return
page = self.REQUEST.get('ACTUAL_URL').split('/')[-1]
return page not in ('edit', 'query', 'search', 'do')
def showForgotPassword(self):
'''We must show link "forgot password?" when the app requires it.'''
return self.getProductConfig(True).activateForgotPassword
def getLanguages(self):
'''Returns the supported languages. First one is the default.'''
return self.getProductConfig(True).languages
def getLanguageName(self, code):
'''Gets the language name (in this language) from a 2-chars language
p_code.'''
return languages.get(code)[2]
def changeLanguage(self):
'''Sets the language cookie with the new desired language code that is
in request["language"].'''
rq = self.REQUEST
rq.RESPONSE.setCookie('_ZopeLg', rq['language'], path='/')
return self.goto(rq['HTTP_REFERER'])
def flipLanguageDirection(self, align, dir):
'''According to language direction p_dir ('ltr' or 'rtl'), this method
turns p_align from 'left' to 'right' (or the inverse) when
required.'''
if dir == 'ltr': return align
if align == 'left': return 'right'
if align == 'right': return 'left'
return align
def getGlobalCssJs(self, dir):
'''Returns the list of CSS and JS files to include in the main template.
The method ensures that appy.css and appy.js come first. If p_dir
(=language *dir*rection) is "rtl" (=right-to-left), the stylesheet
for rtl languages is also included.'''
names = self.getPhysicalRoot().ui.objectIds('File')
# The single Appy Javascript file
names.remove('appy.js'); names.insert(0, 'appy.js')
# CSS changes for left-to-right languages
names.remove('appyrtl.css')
if dir == 'rtl': names.insert(0, 'appyrtl.css')
names.remove('appy.css'); names.insert(0, 'appy.css')
return names
def consumeMessages(self):
'''Returns the list of messages to show to a web page and clean it in
the session.'''
rq = self.REQUEST
res = rq.SESSION.get('messages', '')
if res:
del rq.SESSION['messages']
res = ' '.join([m[1] for m in res])
return res
def getRootClasses(self):
'''Returns the list of root classes for this application.'''
cfg = self.getProductConfig().appConfig
rootClasses = cfg.rootClasses
if not rootClasses:
# We consider every class as being a root class.
rootClasses = self.getProductConfig().appClassNames
return [self.getAppyClass(k) for k in rootClasses]
def getSearchInfo(self, className, refInfo=None):
'''Returns, as an object:
- the list of searchable fields (some among all indexed fields);
- the number of columns for layouting those fields.'''
fields = []
if refInfo:
# The search is triggered from a Ref field.
refObject, fieldName = self.getRefInfo(refInfo)
refField = refObject.getAppyType(fieldName)
fieldNames = refField.queryFields or ()
nbOfColumns = refField.queryNbCols
else:
# The search is triggered from an app-wide search.
klass = self.getAppyClass(className)
fieldNames = getattr(klass, 'searchFields', None)
if not fieldNames:
# Gather all the indexed fields on this class.
fieldNames = [f.name for f in self.getAllAppyTypes(className) \
if f.indexed]
nbOfColumns = getattr(klass, 'numberOfSearchColumns', 3)
for name in fieldNames:
field = self.getAppyType(name, className=className)
fields.append(field)
return Object(fields=fields, nbOfColumns=nbOfColumns)
queryParamNames = ('className', 'search', 'sortKey', 'sortOrder',
'filterKey', 'filterValue')
def getQueryInfo(self):
'''If we are showing search results, this method encodes in a string all
the params in the request that are required for re-triggering the
search.'''
rq = self.REQUEST
res = ''
if 'search' in rq:
res = ';'.join([rq.get(key,'').replace(';','') \
for key in self.queryParamNames])
return res
def getResultMode(self, className):
'''Must we show, on pxQueryResult, instances of p_className as a list or
as a grid?'''
klass = self.getAppyClass(className)
return getattr(klass, 'resultMode', 'list')
def showPortlet(self, obj, layoutType):
'''When must the portlet be shown? p_obj and p_layoutType can be None
if we are not browing any objet (ie, we are on the home page).'''
# Not on 'edit' pages.
if layoutType == 'edit': return
res = True
if obj and hasattr(obj, 'showPortlet'):
res = obj.showPortlet()
else:
tool = self.appy()
if hasattr(tool, 'showPortletAt'):
res = tool.showPortletAt(self.REQUEST['ACTUAL_URL'])
return res
def getObject(self, uid, appy=False, brain=False):
'''Allows to retrieve an object from its p_uid.'''
res = self.getPhysicalRoot().catalog(UID=uid)
if not res: return
res = res[0]
if brain: return res
res = res._unrestrictedGetObject()
if not appy: return res
return res.appy()
def getAllowedValue(self):
'''Gets, for the current user, the value of index "Allowed".'''
user = self.getUser()
# Get the user roles. If we do not make a copy of the list here, we will
# really add user logins among user roles!
res = user.getRoles()[:]
# Get the user logins
if user.login != 'anon':
for login in user.getLogins():
res.append('user:%s' % login)
return res
def executeQuery(self, className, searchName=None, startNumber=0,
search=None, remember=False, brainsOnly=False,
maxResults=None, noSecurity=False, sortBy=None,
sortOrder='asc', filterKey=None, filterValue=None,
refObject=None, refField=None):
'''Executes a query on instances of a given p_className in the catalog.
If p_searchName is specified, it corresponds to:
1) a search defined on p_className: additional search criteria
will be added to the query, or;
2) "customSearch": in this case, additional search criteria will
also be added to the query, but those criteria come from the
session (in key "searchCriteria") and came from pxSearch.
We will retrieve objects from p_startNumber. If p_search is defined,
it corresponds to a custom Search instance (instead of a predefined
named search like in p_searchName). If both p_searchName and p_search
are given, p_search is ignored.
This method returns a list of objects in the form of an instance of
SomeObjects (see in appy.gen.utils). If p_brainsOnly is True, it
returns a list of brains instead (can be useful for some usages like
knowing the number of objects without needing to get information
about them). If no p_maxResults is specified, the method returns
maximum self.numberOfResultsPerPage. The method returns all objects
if p_maxResults equals string "NO_LIMIT".
If p_noSecurity is True, it gets all the objects, even those that the
currently logged user can't see.
The result is sorted according to the potential sort key defined in
the Search instance (Search.sortBy, together with Search.sortOrder).
But if parameter p_sortBy is given, it defines or overrides the sort.
In this case, p_sortOrder gives the order (*asc*ending or
*desc*ending).
If p_filterKey is given, it represents an additional search parameter
to take into account: the corresponding search value is in
p_filterValue.
If p_refObject and p_refField are given, the query is limited to the
objects that are referenced from p_refObject through p_refField.'''
params = {'ClassName': className}
appyClass = self.getAppyClass(className, wrapper=True)
if not brainsOnly: params['batch'] = True
# Manage additional criteria from a search when relevant
if searchName: search = self.getSearch(className, searchName)
if search:
# Add in params search and sort criteria.
search.updateSearchCriteria(params, appyClass)
# Determine or override sort if specified.
if sortBy:
params['sort_on'] = Search.getIndexName(sortBy, usage='sort')
if sortOrder == 'desc': params['sort_order'] = 'reverse'
else: params['sort_order'] = None
# If defined, add the filter among search parameters.
if filterKey:
filterKey = Search.getIndexName(filterKey)
filterValue = Search.getSearchValue(filterKey,filterValue,appyClass)
params[filterKey] = filterValue
# TODO This value needs to be merged with an existing one if already
# in params, or, in a first step, we should avoid to display the
# corresponding filter widget on the screen.
if refObject:
refField = refObject.getAppyType(refField)
params['UID'] = getattr(refObject, refField.name).data
# Use index "Allowed" if noSecurity is False
if not noSecurity: params['Allowed'] = self.getAllowedValue()
brains = self.getPath("/catalog")(**params)
if brainsOnly:
# Return brains only.
if not maxResults: return brains
else: return brains[:maxResults]
if not maxResults:
if refField: maxResults = refField.maxPerPage
else: maxResults = self.appy().numberOfResultsPerPage
elif maxResults == 'NO_LIMIT': maxResults = None
res = gutils.SomeObjects(brains, maxResults, startNumber,
noSecurity=noSecurity)
res.brainsToObjects()
# In some cases (p_remember=True), we need to keep some information
# about the query results in the current user's session, allowing him
# to navigate within elements without re-triggering the query every
# time a page for an element is consulted.
if remember:
if not searchName:
if not search or (search.name == 'allSearch'):
searchName = className
else:
searchName = search.name
uids = {}
i = -1
for obj in res.objects:
i += 1
uids[startNumber+i] = obj.UID()
self.REQUEST.SESSION['search_%s' % searchName] = uids
return res
def getResultColumnsLayouts(self, className, refInfo):
'''Returns the column layouts for displaying objects of
p_className.'''
if refInfo[0]:
return refInfo[0].getAppyType(refInfo[1]).shownInfo
else:
k = self.getAppyClass(className)
return hasattr(k, 'listColumns') and k.listColumns or ('title',)
def truncateValue(self, value, width=20):
'''Truncates the p_value according to p_width. p_value has to be
unicode-encoded for being truncated (else, one char may be spread on
2 chars).'''
# Param p_width can be None.
if not width: width = 20
if isinstance(value, str): value = value.decode('utf-8')
if len(value) > width: return value[:width] + '...'
return value
def truncateText(self, text, width=20):
'''Truncates p_text to max p_width chars. If the text is longer than
p_width, the truncated part is put in a "acronym" html tag. p_text
has to be unicode-encoded for being truncated (else, one char may be
spread on 2 chars).'''
# Param p_width can be None.
if not width: width = 20
if isinstance(text, str): text = text.decode('utf-8')
if len(text) <= width: return text
return '<acronym title="%s">%s...</acronym>' % (text, text[:width])
def splitList(self, l, sub):
'''Returns a list made of the same elements as p_l, but grouped into
sub-lists of p_sub elements.'''
return sutils.splitList(l, sub)
def quote(self, s, escapeWithEntity=True):
'''Returns the quoted version of p_s.'''
if not isinstance(s, str): s = str(s)
repl = escapeWithEntity and ''' or "\\'"
s = s.replace('\r\n', '').replace('\n', '').replace("'", repl)
return "'%s'" % s
def getLayoutType(self):
'''Guess the current layout type, according to actual URL.'''
url = self.REQUEST['ACTUAL_URL']
if url.endswith('/view'): return 'view'
if url.endswith('/edit') or url.endswith('/do'): return 'edit'
def getZopeClass(self, name):
'''Returns the Zope class whose name is p_name.'''
exec('from Products.%s.%s import %s as C'% (self.getAppName(),name,name))
return C
def getAppyClass(self, zopeName, wrapper=False):
'''Gets the Appy class corresponding to the Zope class named p_name.
If p_wrapper is True, it returns the Appy wrapper. Else, it returns
the user-defined class.'''
# p_zopeName may be the name of the Zope class *or* the name of the Appy
# class (shorter, not prefixed with the underscored package path).
classes = self.getProductConfig().allShortClassNames
if zopeName in classes: zopeName = classes[zopeName]
zopeClass = self.getZopeClass(zopeName)
if wrapper: return zopeClass.wrapperClass
else: return zopeClass.wrapperClass.__bases__[-1]
def getAllClassNames(self):
'''Returns the name of all classes within this app, including default
Appy classes (Tool, Translation, Page, etc).'''
return self.getProductConfig().allClassNames + [self.__class__.__name__]
def getCreateMeans(self, klass):
'''Gets the different ways objects of p_klass can be created (currently:
via a web form or programmatically only). Result is a list.'''
res = []
if 'create' not in klass.__dict__:
return ['form']
else:
means = klass.create
if means:
if isinstance(means, str): res = [means]
else: res = means
return res
def userMaySearch(self, klass):
'''May the user search among instances of root p_klass ?'''
# When editing a form, one should avoid annoying the user with this.
url = self.REQUEST['ACTUAL_URL']
if url.endswith('/edit') or url.endswith('/do'): return
if hasattr(klass, 'maySearch'): return klass.maySearch(self.appy())
return True
def userMayCreate(self, klass):
'''May the logged user create instances of p_klass ? This information
can be defined on p_klass, in static attribute "creators".
1. If this attr holds a list, we consider it to be a list of roles,
and we check that the user has at least one of those roles.
2. If this attr holds a boolean, we consider that the user can create
instances of this class if the boolean is True.
3. If this attr stores a method, we execute the method, and via its
result, we fall again in cases 1 or 2.
If p_klass does not define this attr "creators", we will use a
default list of roles as defined in the config.'''
# Get the value of attr "creators", or a default value if not present.
if hasattr(klass, 'creators'):
creators = klass.creators
else:
creators = self.getProductConfig().appConfig.defaultCreators
# Resolve case (3): if "creators" is a method, execute it.
if isinstance(creators, collections.Callable): creators = creators(self.appy())
# Resolve case (2)
if isinstance(creators, bool) or not creators: return creators
# Resolve case (1): checks whether the user has at least one of the
# roles listed in "creators".
for role in self.getUser().getRoles():
if role in creators:
return True
def isSortable(self, name, className, usage):
'''Is field p_name defined on p_className sortable for p_usage purposes
(p_usage can be "ref" or "search")?'''
if (',' in className) or (name == 'state'): return False
appyType = self.getAppyType(name, className=className)
if appyType: return appyType.isSortable(usage=usage)
def subTitleIsUsed(self, className):
'''Does class named p_className define a method "getSubTitle"?'''
klass = self.getAppyClass(className)
return hasattr(klass, 'getSubTitle')
def _searchValueIsEmpty(self, key):
'''Returns True if request value in key p_key can be considered as
empty.'''
rq = self.REQUEST.form
if key.endswith('*int') or key.endswith('*float'):
# We return True if "from" AND "to" values are empty.
toKey = '%s_to' % key[2:key.find('*')]
return not rq[key].strip() and not rq[toKey].strip()
elif key.endswith('*date'):
# We return True if "from" AND "to" values are empty. A value is
# considered as not empty if at least the year is specified.
toKey = '%s_to_year' % key[2:-5]
return not rq[key] and not rq[toKey]
else:
return not rq[key]
def _getDateTime(self, year, month, day, setMin):
'''Gets a valid DateTime instance from date information coming from the
request as strings in p_year, p_month and p_day. Returns None if
p_year is empty. If p_setMin is True, when some
information is missing (month or day), we will replace it with the
minimum value (=1). Else, we will replace it with the maximum value
(=12, =31).'''
if not year: return None
if not month:
if setMin: month = 1
else: month = 12
if not day:
if setMin: day = 1
else: day = 31
DateTime = self.getProductConfig().DateTime
# Set the hour
if setMin: hour = '00:00'
else: hour = '23:59'
# We loop until we find a valid date. For example, we could loop from
# 2009/02/31 to 2009/02/28.
dateIsWrong = True
while dateIsWrong:
try:
res = DateTime('%s/%s/%s %s' % (year, month, day, hour))
dateIsWrong = False
except:
day = int(day)-1
return res
def _getDefaultSearchCriteria(self):
'''We are about to perform an advanced search on instances of a given
class. Check, on this class, if in field Class.searchAdvanced, some
default criteria (field values, sort filters, etc) exist, and, if
yes, return it.'''
res = {}
rq = self.REQUEST
if 'className' not in rq.form: return res
klass = self.getAppyClass(rq.form['className'])
if not hasattr(klass, 'searchAdvanced'): return res
# In klass.searchAdvanced, we have the Search instance representing
# default advanced search criteria.
wrapperClass = self.getAppyClass(rq.form['className'], wrapper=True)
klass.searchAdvanced.updateSearchCriteria(res, wrapperClass,
advanced=True)
return res
transformMethods = {'uppercase': 'upper', 'lowercase': 'lower',
'capitalize': 'capitalize'}
def storeSearchCriteria(self):
'''Stores the search criteria coming from the request into the
session.'''
rq = self.REQUEST
# Store the search criteria in the session
criteria = self._getDefaultSearchCriteria()
for name in list(rq.form.keys()):
if name.startswith('w_') and not self._searchValueIsEmpty(name):
hasStar = name.find('*') != -1
fieldName = not hasStar and name[2:] or name[2:name.find('*')]
field = self.getAppyType(fieldName, rq.form['className'])
if field and not field.persist and not field.indexed: continue
# We have a(n interval of) value(s) that is not empty for a
# given field or index.
value = rq.form[name]
if hasStar:
value = value.strip()
# The type of the value is encoded after char "*".
name, type = name.split('*')
if type == 'bool':
exec('value = %s' % value)
elif type in ('int', 'float'):
# Get the "from" value
if not value: value = None
else:
exec('value = %s(value)' % type)
# Get the "to" value
toValue = rq.form['%s_to' % name[2:]].strip()
if not toValue: toValue = None
else:
exec('toValue = %s(toValue)' % type)
value = (value, toValue)
elif type == 'date':
prefix = name[2:]
# Get the "from" value
year = value
month = rq.form['%s_from_month' % prefix]
day = rq.form['%s_from_day' % prefix]
fromDate = self._getDateTime(year, month, day, True)
# Get the "to" value"
year = rq.form['%s_to_year' % prefix]
month = rq.form['%s_to_month' % prefix]
day = rq.form['%s_to_day' % prefix]
toDate = self._getDateTime(year, month, day, False)
value = (fromDate, toDate)
elif type.startswith('string'):
# In the case of a string, it could be necessary to
# apply some text transform.
if len(type) > 6:
transform = type.split('-')[1]
if (transform != 'none') and value:
exec('value = value.%s()' % \
self.transformMethods[transform])
if isinstance(value, list):
# It is a list of values. Check if we have an operator for
# the field, to see if we make an "and" or "or" for all
# those values. "or" will be the default.
operKey = 'o_%s' % name[2:]
oper = ' %s ' % rq.form.get(operKey, 'or').upper()
value = oper.join(value)
criteria[name[2:]] = value
# Complete criteria with Ref info if the search is restricted to
# referenced objects of a Ref field.
refInfo = rq.get('ref', None)
if refInfo: criteria['_ref'] = refInfo
rq.SESSION['searchCriteria'] = criteria
def onSearchObjects(self):
'''This method is called when the user triggers a search from
pxSearch.'''
rq = self.REQUEST
self.storeSearchCriteria()
# Go to the screen that displays search results
backUrl = '%s/query?className=%s&&search=customSearch' % \
(self.absolute_url(), rq['className'])
return self.goto(backUrl)
def getJavascriptMessages(self):
'''Returns the translated version of messages that must be shown in
Javascript popups.'''
res = ''
for msg in jsMessages:
res += 'var %s = "%s";\n' % (msg, self.translate(msg))
return res
def getColumnsSpecifiers(self, className, columnLayouts, dir):
'''Extracts and returns, from a list of p_columnLayouts, info required
for displaying columns of field values for instances of p_className,
either in a result screen or for a Ref field.'''
res = []
for info in columnLayouts:
fieldName, width, align = ColumnLayout(info).get()
align = self.flipLanguageDirection(align, dir)
field = self.getAppyType(fieldName, className)
if not field:
self.log('Field "%s", used in a column specifier, was not ' \
'found.' % fieldName, type='warning')
else:
res.append(Object(field=field, width=width, align=align))
return res
def getRefInfo(self, refInfo=None):
'''When a search is restricted to objects referenced through a Ref
field, this method returns information about this reference: the
source class and the Ref field. If p_refInfo is not given, we search
it among search criteria in the session.'''
if not refInfo and (self.REQUEST.get('search', None) == 'customSearch'):
criteria = self.REQUEST.SESSION.get('searchCriteria', None)
if criteria and '_ref' in criteria: refInfo = criteria['_ref']
if not refInfo: return None, None
objectUid, fieldName = refInfo.split(':')
obj = self.getObject(objectUid)
return obj, fieldName
def getGroupedSearches(self, klass):
'''Returns an object with 2 attributes:
* "searches" stores the searches that are defined for p_klass;
* "default" stores the search defined as the default one.
Every item representing a search is a dict containing info about a
search or about a group of searches.
'''
res = []
default = None # Also retrieve the default one here.
groups = {} # The already encountered groups
page = Page('searches') # A dummy page required by class UiGroup
# Get the searches statically defined on the class
className = self.getPortalType(klass)
searches = ClassDescriptor.getSearches(klass, tool=self.appy())
# Get the dynamically computed searches
if hasattr(klass, 'getDynamicSearches'):
searches += klass.getDynamicSearches(self.appy())
for search in searches:
# Create the search descriptor
uiSearch = UiSearch(search, className, self)
if not search.group:
# Insert the search at the highest level, not in any group.
res.append(uiSearch)
else:
uiGroup = search.group.insertInto(res, groups, page, className,
content='searches')
uiGroup.addElement(uiSearch)
# Is this search the default search?
if search.default: default = uiSearch
return Object(searches=res, default=default)
def getSearch(self, className, name, ui=False):
'''Gets the Search instance (or a UiSearch instance if p_ui is True)
corresponding to the search named p_name, on class p_className.'''
if name == 'customSearch':
# It is a custom search whose parameters are in the session.
fields = self.REQUEST.SESSION['searchCriteria']
res = Search('customSearch', **fields)
elif name:
appyClass = self.getAppyClass(className)
# Search among static searches
res = ClassDescriptor.getSearch(appyClass, name)
if not res and hasattr(appyClass, 'getDynamicSearches'):
# Search among dynamic searches
for search in appyClass.getDynamicSearches(self.appy()):
if search.name == name:
res = search
break
else:
# It is the search for every instance of p_className
res = Search('allSearch')
# Return a UiSearch if required.
if ui: res = UiSearch(res, className, self)
return res
def advancedSearchEnabledFor(self, klass):
'''Is advanced search visible for p_klass ?'''
# By default, advanced search is enabled.
if not hasattr(klass, 'searchAdvanced'): return True
# Evaluate attribute "show" on this Search instance representing the
# advanced search.
return klass.searchAdvanced.isShowable(klass, self.appy())
def portletBottom(self, klass):
'''Is there a custom zone to display at the bottom of the portlet zone
for p_klass?'''
if not hasattr(klass, 'getPortletBottom'): return ''
res = klass.getPortletBottom(self.appy())
if not res: return ''
return res
def getQueryUrl(self, contentType, searchName, startNumber=None):
'''This method creates the URL that allows to perform a (non-Ajax)
request for getting queried objects from a search named p_searchName
on p_contentType.'''
baseUrl = self.absolute_url()
baseParams = 'className=%s' % contentType
rq = self.REQUEST
if rq.get('ref'): baseParams += '&ref=%s' % rq.get('ref')
# Manage start number
if startNumber != None:
baseParams += '&startNumber=%s' % startNumber
elif 'startNumber' in rq:
baseParams += '&startNumber=%s' % rq['startNumber']
# Manage search name
if searchName: baseParams += '&search=%s' % searchName
return '%s/query?%s' % (baseUrl, baseParams)
def computeStartNumberFrom(self, currentNumber, totalNumber, batchSize):
'''Returns the number (start at 0) of the first element in a list
containing p_currentNumber (starts at 0) whose total number is
p_totalNumber and whose batch size is p_batchSize.'''
startNumber = 0
res = startNumber
while (startNumber < totalNumber):
if (currentNumber < startNumber + batchSize):
return startNumber
else:
startNumber += batchSize
return startNumber
def getNavigationInfo(self, inPopup=False):
'''Extracts navigation information from request/nav and returns an
object with the info that a page can use for displaying object
navigation.'''
res = Object()
rq = self.REQUEST
t, d1, d2, currentNumber, totalNumber = rq.get('nav').split('.')
res.currentNumber = int(currentNumber)
res.totalNumber = int(totalNumber)
# Compute the label of the search, or ref field
if t == 'search':
searchName = d2
if not searchName:
# We search all objects of a given type.
label = '%s_plural' % d1.split(':')[0]
elif searchName == 'customSearch':
# This is an advanced, custom search.
label = 'search_results'
else:
# This is a named, predefined search.
label = '%s_search_%s' % (d1.split(':')[0], searchName)
res.backText = self.translate(label)
# If it is a dynamic search this label does not exist.
if ('_' in res.backText): res.backText = ''
else:
fieldName, pageName = d2.split(':')
sourceObj = self.getObject(d1)
label = '%s_%s' % (sourceObj.meta_type, fieldName)
res.backText = '%s - %s' % (sourceObj.Title(),self.translate(label))
newNav = '%s.%s.%s.%%d.%s' % (t, d1, d2, totalNumber)
# Among, first, previous, next and last, which one do I need?
previousNeeded = False # Previous ?
previousIndex = res.currentNumber - 2
if (previousIndex > -1) and (res.totalNumber > previousIndex):
previousNeeded = True
nextNeeded = False # Next ?
nextIndex = res.currentNumber
if nextIndex < res.totalNumber: nextNeeded = True
firstNeeded = False # First ?
firstIndex = 0
if previousIndex > 0: firstNeeded = True
lastNeeded = False # Last ?
lastIndex = res.totalNumber - 1
if (nextIndex < lastIndex): lastNeeded = True
# Get the list of available UIDs surrounding the current object
if t == 'ref': # Manage navigation from a reference
# In the case of a reference, we retrieve ALL surrounding objects.
masterObj = self.getObject(d1)
batchSize = masterObj.getAppyType(fieldName).maxPerPage
uids = getattr(masterObj, fieldName)
# Display the reference widget at the page where the current object
# lies.
startNumberKey = '%s%s_startNumber' % (masterObj.id, fieldName)
startNumber = self.computeStartNumberFrom(res.currentNumber-1,
res.totalNumber, batchSize)
res.sourceUrl = masterObj.getUrl(**{startNumberKey:startNumber,
'page':pageName, 'nav':''})
else: # Manage navigation from a search
contentType = d1
searchName = keySuffix = d2
batchSize = self.appy().numberOfResultsPerPage
if not searchName: keySuffix = contentType
s = rq.SESSION
searchKey = 'search_%s' % keySuffix
if searchKey in s: uids = s[searchKey]
else: uids = {}
# In the case of a search, we retrieve only a part of all
# surrounding objects, those that are stored in the session.
if (previousNeeded and previousIndex not in uids) or \
(nextNeeded and nextIndex not in uids):
# I do not have this UID in session. I will need to
# retrigger the query by querying all objects surrounding
# this one.
newStartNumber = (res.currentNumber-1) - (batchSize / 2)
if newStartNumber < 0: newStartNumber = 0
self.executeQuery(contentType, searchName=searchName,
startNumber=newStartNumber, remember=True)
uids = s[searchKey]
# For the moment, for first and last, we get them only if we have
# them in session.
if 0 not in uids: firstNeeded = False
if lastIndex not in uids: lastNeeded = False
# Compute URL of source object
startNumber = self.computeStartNumberFrom(res.currentNumber-1,
res.totalNumber, batchSize)
res.sourceUrl = self.getQueryUrl(contentType, searchName,
startNumber=startNumber)
# Compute URLs
for urlType in ('previous', 'next', 'first', 'last'):
exec('needIt = %sNeeded' % urlType)
urlKey = '%sUrl' % urlType
setattr(res, urlKey, None)
if needIt:
exec('index = %sIndex' % urlType)
uid = None
try:
uid = uids[index]
# uids can be a list (ref) or a dict (search)
except KeyError: pass
except IndexError: pass
if uid:
brain = self.getObject(uid, brain=True)
if brain:
sibling = brain.getObject()
setattr(res, urlKey, sibling.getUrl(\
nav=newNav % (index + 1),
page=rq.get('page', 'main'), inPopup=inPopup))
return res
def getGroupedSearchFields(self, searchInfo):
'''This method transforms p_searchInfo.fields, which is a "flat"
list of fields, into a list of lists, where every sub-list having
length p_searchInfo.nbOfColumns. For every field, scolspan
(=colspan "for search") is taken into account.'''
res = []
row = []
rowLength = 0
for field in searchInfo.fields:
# Can I insert this field in the current row?
remaining = searchInfo.nbOfColumns - rowLength
if field.scolspan <= remaining:
# Yes.
row.append(field)
rowLength += field.scolspan
else:
# We must put the field on a new line. Complete the current one
# if not complete.
while rowLength < searchInfo.nbOfColumns:
row.append(None)
rowLength += 1
res.append(row)
row = [field]
rowLength = field.scolspan
# Complete the last unfinished line if required.
if row:
while rowLength < searchInfo.nbOfColumns:
row.append(None)
rowLength += 1
res.append(row)
return res
# --------------------------------------------------------------------------
# Authentication-related methods
# --------------------------------------------------------------------------
def identifyUser(self, alsoSpecial=False):
'''To identify a user means: get its login and password. There are
several places to look for this information: http authentication,
cookie of credentials coming from the web form.
If no user could be identified, and p_alsoSpecial is True, we will
nevertheless identify a "special user": "system", representing the
system itself (running at startup or in batch mode) or "anon",
representing an anonymous user.'''
tool = self.appy()
req = tool.request
login = password = None
# a. Identify the user from http basic authentication.
if getattr(req, '_auth', None):
# HTTP basic authentication credentials are present (used when
# connecting to the ZMI). Decode it.
creds = req._auth
if creds.lower().startswith('basic '):
try:
creds = creds.split(' ')[-1]
login, password = base64.decodestring(creds).split(':', 1)
except Exception as e:
pass
# b. Identify the user from the authentication cookie.
if not login:
login, password = gutils.readCookie(req)
# c. Identify the user from the authentication form.
if not login:
login = req.get('__ac_name', None)
password = req.get('__ac_password', '')
# Stop identification here if we don't need to return a special user
if not alsoSpecial: return login, password
# d. All the identification methods failed. So identify the user as
# "anon" or "system".
if not login:
# If we have a fake request, we are at startup or in batch mode and
# the user is "system". Else, it is "anon". At Zope startup, Appy
# uses an Object instance as a fake request. In "zopectl run" mode
# (the Zope batch mode), Appy adds a param "_fake_" on the request
# object created by Zope.
if (req.__class__.__name__ == 'Object') or \
(hasattr(req, '_fake_') and req._fake_):
login = 'system'
else:
login = 'anon'
return login, password
def getLdapUser(self, login, password):
'''Returns a local User instance corresponding to a LDAP user if p_login
and p_password correspong to a valid LDAP user.'''
# Check if LDAP is configured.
cfg = self.getProductConfig(True).ldap
if not cfg: return
# Get a connector to the LDAP server and connect to the LDAP server.
serverUri = cfg.getServerUri()
connector = LdapConnector(serverUri, tool=self)
success, msg = connector.connect(cfg.adminLogin, cfg.adminPassword)
if not success: return
# Check if the user corresponding to p_login exists in the LDAP.
filter = connector.getFilter(cfg.getUserFilterValues(login))
params = cfg.getUserAttributes()
ldapData = connector.search(cfg.baseDn, cfg.scope, filter, params)
if not ldapData: return
# The user exists. Try to connect to the LDAP with this user in order
# to validate its password.
userConnector = LdapConnector(serverUri, tool=self)
success, msg = userConnector.connect(ldapData[0][0], password)
if not success: return
# The password is correct. We can create/update our local user
# corresponding to this LDAP user.
userParams = cfg.getUserParams(ldapData[0][1])
tool = self.appy()
user = tool.search1('User', noSecurity=True, login=login)
if user:
# Update the user with fresh info about him from the LDAP
for name, value in userParams.items():
setattr(user, name, value)
# Update user password
user.setPassword(password, log=False)
user.reindex()
else:
# Create the user
user = tool.create('users', noSecurity=True, login=login,
password1=password, source='ldap', **userParams)
return user
def getUser(self, authentify=False, source='zodb'):
'''Gets the current user. If p_authentify is True, in addition to
finding the logged user and returning it (=identification), we check
if found credentials are valid (=authentification).
If p_authentify is True and p_source is "zodb", authentication is
performed locally. Else (p_source is "ldap"), authentication is
performed on a LDAP (if a LDAP configuration is found). If p_source
is "any", authentication is performed on the local User object, be it
really local or a copy of a LDAP user.'''
tool = self.appy()
req = tool.request
# Try first to return the user that can be cached on the request. In
# this case, we suppose authentication has previously been done, and we
# just return the cached user.
if hasattr(req, 'user'): return req.user
# Identify the user (=find its login and password). If we don't need
# to authentify the user, we ask to identify a user or, if impossible,
# a special user.
login, password = self.identifyUser(alsoSpecial=not authentify)
# Stop here if no user was found and authentication was required.
if authentify and not login: return
# Now, get the User instance.
if source == 'zodb':
# Get the User object, but only if it is a true local user.
user = tool.search1('User', noSecurity=True, login=login)
if user and (user.source != 'zodb'): user = None # Not a local one.
elif source == 'ldap':
user = self.getLdapUser(login, password)
elif source == 'any':
# Get the user object, be it really local or a copy of a LDAP user.
user = tool.search1('User', noSecurity=True, login=login)
if not user: return
# Authentify the user if required.
if authentify:
if not user.checkPassword(password):
# Disable the authentication cookie.
req.RESPONSE.expireCookie('_appy_', path='/')
return
# Create an authentication cookie for this user.
gutils.writeCookie(login, password, req)
# Cache the user and some precomputed values, for performance.
req.user = user
req.userRoles = user.getRoles()
req.userLogins = user.getLogins()
req.zopeUser = user.getZopeUser()
return user
def performLogin(self):
'''Logs the user in.'''
rq = self.REQUEST
jsEnabled = rq.get('js_enabled', False) in ('1', 1)
cookiesEnabled = rq.get('cookies_enabled', False) in ('1', 1)
urlBack = rq['HTTP_REFERER']
if jsEnabled and not cookiesEnabled:
msg = self.translate('enable_cookies')
return self.goto(urlBack, msg)
# Authenticate the user.
login = rq.get('__ac_name', None)
if self.getUser(authentify=True) or \
self.getUser(authentify=True, source='ldap'):
msg = self.translate('login_ok')
logMsg = 'User "%s" logged in.' % login
else:
msg = self.translate('login_ko')
logMsg = 'Authentication failed with login "%s".' % login
self.log(logMsg)
return self.goto(self.getApp().absolute_url(), msg)
def performLogout(self):
'''Logs out the current user when he clicks on "disconnect".'''
rq = self.REQUEST
userId = self.getUser().login
# Perform the logout in acl_users
rq.RESPONSE.expireCookie('_appy_', path='/')
# Invalidate the user session.
try:
sdm = self.session_data_manager
except AttributeError as ae:
# When ran in test mode, session_data_manager is not there.
sdm = None
if sdm:
session = sdm.getSessionData(create=0)
if session is not None:
session.invalidate()
self.log('User "%s" has been logged out.' % userId)
# Remove user from variable "loggedUsers"
if userId in self.loggedUsers: del self.loggedUsers[userId]
return self.goto(self.getApp().absolute_url())
# This dict stores, for every logged user, the date/time of its last access
loggedUsers = {}
forgetAccessExtensions = ('.jpg', '.gif', '.png', '.js', '.css')
def rememberAccess(self, id, user):
'''Every time there is a hit on the server, this method is called in
order to update global dict loggedUsers (see above).'''
if not id: return
if os.path.splitext(id)[-1].lower() in self.forgetAccessExtensions:
return
self.loggedUsers[user.login] = time.time()
# "Touch" the SESSION object. Else, expiration won't occur.
session = self.REQUEST.SESSION
def validate(self, request, auth='', roles=_noroles):
'''This method performs authentication and authorization. It is used as
a replacement for Zope's AccessControl.User.BasicUserFolder.validate,
that allows to manage cookie-based authentication.'''
v = request['PUBLISHED'] # The published object
tool = self.getParentNode().config
# v is the object (value) we're validating access to
# n is the name used to access the object
# a is the object the object was accessed through
# c is the physical container of the object
a, c, n, v = self._getobcontext(v, request)
# Identify and authentify the user
user = tool.getUser(authentify=True, source='any')
if not user:
# Login and/or password incorrect. Try to authorize and return the
# anonymous user.
if self.authorize(self._nobody, a, c, n, v, roles):
return self._nobody.__of__(self)
else:
return
else:
# We found a user and his password was correct. Try to authorize him
# against the published object. By the way, remember its last access
# to this system.
tool.rememberAccess(a.getId(), user)
user = user.getZopeUser()
if self.authorize(user, a, c, n, v, roles):
return user.__of__(self)
# That didn't work. Try to authorize the anonymous user.
elif self.authorize(self._nobody, a, c, n, v, roles):
return self._nobody.__of__(self)
else:
return
# Patch BasicUserFolder with our version of m_validate above.
from AccessControl.User import BasicUserFolder
BasicUserFolder.validate = validate
def getUserLine(self):
'''Returns info about the currently logged user as a 2-tuple: first
elem is the one-line user info as shown on every page; second line is
the URL to edit user info.'''
user = self.getUser()
info = [user.title]
showable = [r for r in user.getRoles() if r != 'Authenticated']
if showable:
info.append(', '.join([self.translate('role_%s' % r) \
for r in showable]))
# Edit URL for the user.
url = None
if user.o.mayEdit():
url = user.o.getUrl(mode='edit', page='main', nav='')
return (' | '.join(info), url)
def getUserName(self, login=None, normalized=False):
'''Gets the user name corresponding to p_login (or the currently logged
user if None), or the p_login itself if the user does not exist
anymore. If p_normalized is True, special chars in the first and last
names are normalized.'''
tool = self.appy()
if not login: login = tool.user.login
# Manage the special case of an anonymous user.
if login == 'anon':
name = self.translate('anonymous')
if normalized: name = sutils.normalizeString(name)
return name
# Manage the case of any other user.
user = tool.search1('User', noSecurity=True, login=login)
if not user: return login
firstName = user.firstName
name = user.name
res = ''
if firstName:
if normalized: firstName = sutils.normalizeString(firstName)
res += firstName
if name:
if normalized: name = sutils.normalizeString(name)
if res: res += ' ' + name
else: res = name
if not res: res = login
return res
def tempFile(self):
'''A temp file has been created in a temp folder. This method returns
this file to the browser.'''
rq = self.REQUEST
baseFolder = os.path.join(sutils.getOsTempFolder(), self.getAppName())
baseFolder = os.path.join(baseFolder, rq.SESSION.id)
fileName = os.path.join(baseFolder, rq.get('name', ''))
if os.path.exists(fileName):
f = file(fileName)
content = f.read()
f.close()
# Remove the temp file
os.remove(fileName)
return content
return 'File does not exist'
def getResultPodFields(self, contentType):
'''Finds, among fields defined on p_contentType, which ones are Pod
fields that need to be shown on a page displaying query results.'''
# Skip this if we are searching multiple content types.
if ',' in contentType: return ()
return [f for f in self.getAllAppyTypes(contentType) \
if (f.type == 'Pod') and (f.show == 'result')]
def formatDate(self, date, withHour=True):
'''Returns p_date formatted as specified by tool.dateFormat.
If p_withHour is True, hour is appended, with a format specified
in tool.hourFormat.'''
tool = self.appy()
res = date.strftime(tool.dateFormat)
if withHour: res += ' (%s)' % date.strftime(tool.hourFormat)
return res
def generateUid(self, className):
'''Generates a UID for an instance of p_className.'''
name = className.split('_')[-1]
randomNumber = str(random.random()).split('.')[1]
timestamp = ('%f' % time.time()).replace('.', '')
return '%s%s%s' % (name, timestamp, randomNumber)
def manageError(self, error):
'''Manages an error.'''
tb = sys.exc_info()
if error.type.__name__ == 'Unauthorized':
siteUrl = self.getSiteUrl()
htmlMessage = '<a href="/">Back</a> You are not allowed to ' \
'access this page.'
userId = self.appy().user.login
textMessage = 'Unauthorized for %s @%s.' % \
(userId, self.REQUEST.get('PATH_INFO'))
else:
from zExceptions.ExceptionFormatter import format_exception
htmlMessage = format_exception(tb[0], tb[1], tb[2], as_html=1)
htmlMessage = '\n'.join(htmlMessage)
textMessage = format_exception(tb[0], tb[1], tb[2], as_html=0)
textMessage = ''.join(textMessage).strip()
self.log(textMessage, type='error')
return '<div class="error">%s</div>' % htmlMessage
def getMainPages(self):
'''Returns the main pages.'''
if hasattr(self.o.aq_base, 'pages') and self.o.pages:
return [self.getObject(uid) for uid in self.o.pages ]
return ()
def askPasswordReinit(self):
'''A user (anonymmous) does not remember its password. Here we will
send him a mail containing a link that will trigger password
re-initialisation.'''
login = self.REQUEST.get('login').strip()
appyTool = self.appy()
user = appyTool.search1('User', login=login, noSecurity=True)
msg = self.translate('reinit_mail_sent')
backUrl = self.REQUEST['HTTP_REFERER']
if not user:
# Return the message nevertheless. This way, malicious users can't
# deduce information about existing users.
return self.goto(backUrl, msg)
# If login is an email, use it. Else, use user.email instead.
email = user.login
if not String.EMAIL.match(email):
email = user.email
if not email:
# Impossible to re-initialise the password.
return self.goto(backUrl, msg)
# Create a temporary file whose name is the user login and whose
# content is a generated token.
f = file(os.path.join(sutils.getOsTempFolder(), login), 'w')
token = String().generatePassword()
f.write(token)
f.close()
# Send an email
initUrl = '%s/doPasswordReinit?login=%s&token=%s' % \
(self.absolute_url(), login, token)
subject = self.translate('reinit_password')
map = {'url':initUrl, 'siteUrl':self.getSiteUrl()}
body= self.translate('reinit_password_body', mapping=map, format='text')
sendMail(appyTool, email, subject, body)
return self.goto(backUrl, msg)
def doPasswordReinit(self):
'''Performs the password re-initialisation.'''
rq = self.REQUEST
login = rq['login']
token = rq['token']
# Check if such token exists in temp folder
res = None
siteUrl = self.getSiteUrl()
tokenFile = os.path.join(sutils.getOsTempFolder(), login)
if os.path.exists(tokenFile):
f = file(tokenFile)
storedToken = f.read()
f.close()
if storedToken == token:
# Generate a new password for this user
appyTool = self.appy()
user = appyTool.search1('User', login=login, noSecurity=True)
newPassword = user.setPassword()
# Send the new password by email
email = login
if not String.EMAIL.match(email):
email = user.email
subject = self.translate('new_password')
map = {'password': newPassword, 'siteUrl': siteUrl}
body = self.translate('new_password_body', mapping=map,
format='text')
sendMail(appyTool, email, subject, body)
os.remove(tokenFile)
res = self.goto(siteUrl, self.translate('new_password_sent'))
if not res:
res = self.goto(siteUrl, self.translate('wrong_password_reinit'))
return res
def getGoogleAnalyticsCode(self):
'''If the config defined a Google Analytics ID, this method returns the
Javascript code to be included in every page, allowing Google
Analytics to work.'''
# Disable Google Analytics when we are in debug mode.
if self.isDebug(): return
# Disable Google Analytics if no ID is found in the config.
gaId = self.getProductConfig(True).googleAnalyticsId
if not gaId: return
# Google Analytics must be enabled: return the chunk of Javascript
# code specified by Google.
code = "var _gaq = _gaq || [];\n" \
"_gaq.push(['_setAccount', '%s']);\n" \
"_gaq.push(['_trackPageview']);\n" \
"(function() {\n" \
" var ga = document.createElement('script'); " \
"ga.type = 'text/javascript'; ga.async = true;\n" \
" ga.src = ('https:' == document.location.protocol ? " \
"'https://ssl' : 'http://www') + " \
"'.google-analytics.com/ga.js';\n" \
" var s = document.getElementsByTagName('script')[0]; " \
"s.parentNode.insertBefore(ga, s);\n" \
"})();\n" % gaId
return code
def getButtonWidth(self, label):
'''Determine button width, in pixels, corresponding to the button
p_label.'''
# Set a minimum width for small labels.
if len(label) < 15: return 'width:130px'
return 'padding-left: 26px; padding-right: 8px'
def getLinksTargetInfo(self, klass):
'''Appy allows to open links to view or edit instances of p_klass
either via the same browser window, or via a popup. This method
returns info about that, as an object having 2 attributes:
- target is "_self" if the link leads to the same browser window,
"appyIFrame" if the link must be opened in a popup;
- openPopup is unused if target is "_self" and contains the
Javascript code to open the popup.'''
res = Object(target='_self', openPopup='')
if hasattr(klass, 'popup'):
res.target = 'appyIFrame'
d = klass.popup
if isinstance(d, str):
# Width only
params = int(d[:-2])
else:
# Width and height
params = "%s, %s" % (d[0][:-2], d[1][:-2])
res.openPopup = "openPopup('iframePopup',null,%s)" % params
return res
def backFromPopup(self):
'''Returns the PX allowing to close the iframe popup and refresh the
base page.'''
return self.appy().pxBack({'ztool': self})
# ------------------------------------------------------------------------------
| Eveler/libs | __Python__/ufms_blanks/appy3/gen/mixins/ToolMixin.py | Python | gpl-3.0 | 65,134 | 0.00281 |
from enum import Enum
class Direction(Enum):
invalid = (0.0, 0.0)
up = (0.0, -1.0)
down = (0.0, 1.0)
left = (-1.0, 0.0)
right = (1.0, 0.0)
def x(self):
return self.value[0]
def y(self):
return self.value[1]
def __str__(self):
return str(self.value)
| Daarknes/Gadakeco | src/util/directions.py | Python | gpl-3.0 | 328 | 0 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyReadmeRenderer(PythonPackage):
"""readme_renderer is a library for rendering "readme" descriptions
for Warehouse."""
homepage = "https://github.com/pypa/readme_renderer"
url = "https://pypi.io/packages/source/r/readme_renderer/readme_renderer-16.0.tar.gz"
version('16.0', '70321cea986956bcf2deef9981569f39')
depends_on('[email protected]:2.8,3.2:3.3')
depends_on('py-setuptools', type='build')
depends_on('py-bleach', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
| EmreAtes/spack | var/spack/repos/builtin/packages/py-readme-renderer/package.py | Python | lgpl-2.1 | 1,901 | 0.001052 |
#!/usr/bin/env python3
# encoding: UTF-8
import unittest
from vCloudOVFMunger import sanitise_machine_name
# Test my basic string sanitiser. This needs no setup, files, or network stuff.
# python -m unittest test.test_sanitise_machine_name
class XMLTests(unittest.TestCase):
def setUp(self):
self.alltests = [
'same-string' ,'same-string',
'lOWER cASE' ,'lower-case',
'L0@d$@ jµnk' ,'l0-d-j-nk',
' trim my e\nds \n\n' ,'trim-my-e-ds'
]
def tearDown(self):
pass
#Before Python 3.4, each test needs a separate function, so I need
#to do this long-hand.
def test_santise_0(self):
self._t(0)
def test_santise_1(self):
self._t(1)
def test_santise_2(self):
self._t(2)
def test_santise_3(self):
self._t(3)
def _t(self, idx):
fromstr = self.alltests[idx * 2]
tostr = self.alltests[idx * 2 + 1]
self.assertEqual(sanitise_machine_name(fromstr), tostr)
if __name__ == '__main__':
unittest.main()
| environmentalomics/iso-to-image | uploader/test/test_sanitise_machine_name.py | Python | mit | 1,066 | 0.020657 |
import re, os, datetime
from sopel import module
from sopel.config.types import StaticSection, ValidatedAttribute, FilenameAttribute
DEFAULT_CHANLOGS_DIR = os.getenv("HOME") + "/chanlogs"
DEFAULT_LINE_PATTERN = re.compile(r"^([^\s]*) <([^>]*)> (.*)$")
class GrepLogsSection(StaticSection):
dir = FilenameAttribute('dir', directory=True, default=DEFAULT_CHANLOGS_DIR)
def configure(config):
config.define_section('greplogs', GrepLogsSection, validate=False)
config.greplogs.configure_setting('dir','Path to channel log storage directory')
return
def setup(bot):
bot.config.define_section('greplogs', GrepLogsSection)
return
def get_log_files_for_channel(dpath, name):
for fname in os.listdir(dpath):
if not fname.startswith(name):
continue
fpath = "{}/{}".format(dpath, fname)
if not os.access(fpath, os.R_OK):
continue
yield fpath
return
def parse_logline(bot, line):
# in log file, pattern always is
# date <nick> msg
date, nick, msg = [x.strip() for x in re.split(DEFAULT_LINE_PATTERN, line) if len(x.strip()) ]
date = date.replace("+00:00", "+0000")
date_obj = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S%z")
return (date_obj, nick, msg)
@module.commands("grep-logs")
@module.example(".grep-logs http(s)?://")
def grep_logs(bot, trigger):
pattern_str = trigger.group(2)
if not pattern_str:
bot.reply("Missing pattern")
return
pattern = re.compile(pattern_str, re.IGNORECASE)
dpath = bot.config.greplogs.dir
channel_name = trigger.sender
found = 0
for log_file in get_log_files_for_channel(dpath, channel_name):
with open(log_file, "r") as f:
for i, line in enumerate(f.readlines()):
try:
date, nick, msg = parse_logline(bot, line)
if pattern.search(msg):
bot.say("On {}, {} said: {}".format(date.strftime("%c"), nick, msg))
found += 1
except Exception as e:
continue
if found == 0:
bot.reply("No entries found matching '{}'".format(pattern_str))
else:
bot.reply("Found {} entr{} matching '{}'".format(found,
"ies" if found > 1 else "y",
pattern_str))
return
| thegoonies/tools | sopel-modules/grep-logs.py | Python | mit | 2,449 | 0.003675 |
## @example stream_from_webcamera.py
# This example will stream images from your webcamera and run it through a simple edge detection filter (LaplacianOfGaussian)
# and display it in real-time.
import fast
streamer = fast.CameraStreamer.create()
filter = fast.LaplacianOfGaussian.create().connect(streamer)
renderer = fast.ImageRenderer.create().connect(filter)
window = fast.SimpleWindow2D.create().connect(renderer).run()
| smistad/FAST | source/FAST/Examples/Python/stream_from_webcamera.py | Python | bsd-2-clause | 428 | 0.004673 |
'''Siku scenario
Sea ice element simple free drift example
Creates a (large) polygon in polar region and sets some basic winds
in still water. No interaction with any boundaries, just a free
float of the polygon in the area and output in KML format of its
locations.
Be sure that siku module is in your PYTHONPATH.
Use python3 for checking. It is not compatible with python2.x
(c)2014 UAF, written by Anton Kulchitsky
GPLv3 or later license (same as siku)
'''
import subprocess
import os
import math
import sys
import datetime
import mathutils
import numpy
import siku
from siku import polygon
from siku import element
from siku import material
from siku import geocoords
from siku import regrid
from siku import gmt_Plotter
GMT_Plotter = gmt_Plotter.GMT_Plotter
from siku import poly_voronoi
PolyVor = poly_voronoi.PolyVor
from siku import h5load
hload = h5load.Loader
from siku import wnd
def main():
# ---------------------------------------------------------------------
# Define material
# ---------------------------------------------------------------------
ice = material.Material() # default ice values, 10 thicknesses
ice.name = 'ice' # prefer to use our own name instead
# of default
siku.materials.append( ice ) # list of all materials
# table of material names for convenience
matnames = {
'ice': 0,
}
# ---------------------------------------------------------------------
# Wind initializations (NMC grid example)
# ---------------------------------------------------------------------
siku.uw = wnd.NMCVar( 'u1994.nc', 'uwnd' )
siku.vw = wnd.NMCVar( 'v1994.nc', 'vwnd' )
start = datetime.datetime ( 1994, 2, 16, 00, 00, 00 )
for i in range(len( siku.uw.times )):
if siku.uw.times[i] >= start:
break
st_t_ind = i
siku.time.update_index = i - 1
print( 'start time: ' + str( start ) + ' at position: ' + str( i ) + \
' of ' + str( len( siku.uw.times ) ) + '\n\n' )
siku.wind = wnd.NMCSurfaceVField( siku.uw, siku.vw, st_t_ind )
siku.settings.wind_source_type = siku.WIND_SOURCES['NMC']
siku.settings.wind_source_names = [ 'u1994.nc', 'v1994.nc' ]
## w = wnd.NMCSurfaceVField( siku.uw, siku.vw, st_t_ind )
## w.make_test_field( 0.,0. )
## siku.wind = w
# ---------------------------------------------------------------------
# date/time settings
# ---------------------------------------------------------------------
#siku.time.start = datetime.datetime ( 2012, 3, 12, 00, 00, 00 )
#siku.time.finish = datetime.datetime ( 2012, 3, 13 )
#siku.time.finish = datetime.datetime ( 2012, 3, 12, 00, 00, 10 )
#siku.time.dt = datetime.timedelta ( seconds = 1 )
siku.time.dts = datetime.timedelta ( seconds = 600 )
#siku.time.last = siku.time.start
hour = datetime.timedelta ( minutes = 60 )
## time inits by NMC grid times
siku.time.start = siku.uw.times[st_t_ind]
siku.time.last = siku.uw.times[st_t_ind]
siku.time.last_update = siku.time.last
siku.time.finish = siku.uw.times[st_t_ind] + hour * 90
#siku.time.dt = datetime.timedelta ( milliseconds = 1 )
siku.time.dt = ( siku.time.finish - siku.time.start ) / 3600
# ---------------------------------------------------------------------
# elements
# ---------------------------------------------------------------------
coords = []
siku.elements = []
# ---------------------- voronoi initialization ------------------------
print('\nLoading polygons')
## North cap
PV = PolyVor( 'alaska.voronoi.xyz', 'alaska.voronoi.xyzf' )
## Channel (handmade)
## PC = PolyVor( 'alaska.voronoi.xyz', 'alaska.voronoi.xyzf' )
PV.filter_( 0, 360, 60, 90 )
## PC.filter_( 179, 187, 54, 60 )
##TESTING!
#### PV.filter_( 190, 230, 62, 82 )
## PC.filter_( 190, 230, 62, 82 )
##/TESTING
print('Deleting land polygons')
PV.clear_the_land()
coords = PV.coords
## coords = coords + PC.coords
siku.tempc = coords # for debug
### Initializing elements with polygon vertices
for c in coords:
siku.P.update( c )
# Element declaration
E = element.Element( polygon = siku.P, imat = matnames['ice'] )
E.monitor = "drift_monitor"
gh = [ 0.2, 0.2, 0.4, 0.2, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0 ]
E.set_gh( gh, ice )
# all elements in the list
siku.elements.append( E )
## Core will mark polygons, those contain at leas one point from next
## file as 'static'
siku.settings.border_mark = 1
siku.settings.borders = 'contours.ll'
print('Marking borders with GMT')
bor = PV.get_border_by_gmt()
for b in bor:
siku.elements[ b ].flag_state = element.Element.f_static
print('Done\n\n')
# ---------------------- loading from file ----------------------------
## print('file start atempt\n')
##
## hl = hload('save_test.h5')
#### #hl = hload('siku-2014-01-01-12:50:46.h5')
####
#### #hl.load()
## hl.load_fnames()
## hl.load_mats()
## hl.load_els()
## print('\n')
##
## siku.elements = hl.extract_els()
## siku.materials = hl.extract_mats()
##
## hl = None
# ---------------------------------------------------------------------
# Monitor function for the polygon
# ---------------------------------------------------------------------
## Plotter initialization
siku.plotter = GMT_Plotter( 'beaufort94_plot.py' )
### period of picturing
siku.diagnostics.monitor_period = 30
siku.drift_monitor = drift_monitor
siku.diagnostics.step_count = 0
siku.settings.contact_method = siku.CONTACT_METHODS['sweep']
siku.settings.force_model = \
siku.CONTACT_FORCE_MODEL['distributed_spring']
# name of file to load from
#siku.settings.loadfile = 'siku-2014-01-01-12:00:00.h5'
siku.settings.loadfile = 'save_test.h5'
## siku.settings.phys_consts = [ 5000 , 10000000 , 0.75, -0.00003, 1, \
## 1, 1, 1, 1, 1 ]
siku.settings.phys_consts = { 'rigidity' : 10.0,#10,
'viscosity' : 1.0,#1.0,#1
'rotatability' : 0.750,#0.75
'tangency' : -0.00003,#-0.00003
'elasticity' :-50000000.0,#-5000000.0,
'bendability' : 1.0,#1.0,
'solidity' : 0.05,#0.05,
'tensility' : 0.30,#0.615,
'anchority' : 0.0005,
'windage': 0.05, #0.05
'fastency' : 0.50, #0.5
'sigma' : 1.0, # -//- rigidity
'etha' : 1.0 # -//- viscosity
}
## siku.settings.contact_freq_met = siku.CONTACT_DET_FREQ_MET['speed']
## siku.settings.contact_value = 1000
# ---------------------------------------------------------------------
# Diagnostics function for the winds
# ------------------------------abs2( e.V )---------------------------------------
## # We create a grid and append it to monitor grids
## siku.diagnostics.wind_counter = 0
## rg = regrid.Regrid()
## mesh_01 = rg.globe_coverage( 5.0 )
## siku.diagnostics.meshes.append( mesh_01 )
## siku.diagnostics.wind.append(
## ( winds_diag, 0, siku.time.start, 2*siku.time.dt ) )
# ---------------------------------------------------------------------
# Settings
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Callback flag-mask generator
# ---------------------------------------------------------------------
siku.callback.pretimestep = pretimestep
siku.callback.aftertimestep = aftertimestep
siku.callback.conclusions = conclusions
siku.callback.initializations = initializations
siku.callback.updatewind = updatewind
##
siku.callback.presave = presave
siku.err_test = {}
return 0
def presave( t, n, ns ):
'''no saving at all'''
return
# --------------------------------------------------------------------------
def initializations( siku, t ):
subprocess.call(["gmtset", "PS_MEDIA=Custom_24cx20c"])
# --------------------------------------------------------------------------
def conclusions( siku, t ):
with open("err_time.txt", 'w') as erf:
for i in siku.err_test:
erf.write( str(i) + ' : ' )#+ ':\n' )
erf.write( str( len( siku.err_test[i] ) ) )
## for t in siku.err_test[i]:
## erf.write( str( t ) + ' ' )
erf.write( '\n' )
print('creating .gif')
subprocess.call( "nice convert -density 300 -delay 10 beaufort*.eps beaufort.gif", \
shell=True )
# --------------------------------------------------------------------------
def pretimestep( t, n, ns ):
status = siku.MASK['NONE']
siku.diagnostics.step_count = n
siku.local.poly_f = open( 'Polygons.txt', 'w' )
# some specific checks should be placed.
# primitive time stepping
## if t > ( siku.time.last + siku.time.dt ):
## status += siku.MASK['WINDS']
## siku.time.last = t
# step by NMC own time step
if t >= siku.uw.times[siku.time.update_index + 1]: # siku.time.last: #
status += siku.MASK['WINDS']
siku.time.last = t# siku.time.finish#
# and change the winds here
# ~!wind is changed with another call
# and save the current time in a structure
# ~!current time is saved in siku.time.last
return status
# --------------------------------------------------------------------------
def updatewind( siku, t ):
siku.time.update_index += 1
siku.time.last_update = t
siku.wind = \
wnd.NMCSurfaceVField( siku.uw, siku.vw, siku.time.update_index )
print( str( t ) + '\n' )
pass
# --------------------------------------------------------------------------
def aftertimestep( t, n, ns ):
siku.local.poly_f.close()
if siku.diagnostics.step_count % siku.diagnostics.monitor_period == 0:
pic_name = 'beaufort%03d.eps' % \
(siku.diagnostics.step_count / siku.diagnostics.monitor_period)
print('drawing ' + str( pic_name ) )
siku.plotter.plot( pic_name, siku.time.update_index, siku.wind )
#siku.local.poly_f.close()
return 0
# --------------------------------------------------------------------------
def drift_monitor( t, Q, Ps, st, index, ID, W, F, N, m, I, i, A, a_f, w_f ):
## #static polygons (generally shores) may be simply passed
## if st & element.Element.f_static:
## return
## if st & element.Element.f_errored:
## if siku.err_test.get( i, None ):
## siku.err_test[i].append(t)
## else:
## siku.err_test[i] = [ t ]
## return
## print(st)
## input()
## #errored export x-y:
## if st & element.Element.f_errored:
## with open("errored"+str(i)+".txt", 'w') as erf:
## for p in Ps:
## erf.write( str( p[0] ) +'\t'+ str( p[1] )+'\n' )
# create actual quaternion
q = mathutils.Quaternion( Q )
C = mathutils.Vector( (0,0,1) )
# get latitude and longitude of center of mass (0,0,1)
R = q.to_matrix()
c = R * C
# appending vertices to plotting list
if siku.diagnostics.step_count % siku.diagnostics.monitor_period == 0:
Pglob = [ R*mathutils.Vector( p ) for p in Ps ]
vert = [ geocoords.lonlat_deg(mathutils.Vector( p ) ) for p in Pglob ]
poly = siku.local.poly_f
## for debug
## #errored export lon-lat:
## with open("err/errored"+str(i)+".txt", 'w') as erf:
## for v in vert:
## erf.write( str( geocoords.norm_lon(v[0]) )+'\t'+ \
## str( v[1] )+'\n' )
## #errored original export lon-lat:
## with open("err/original"+str(i)+".txt", 'w') as erf:
## #for v in siku.elements[i].verts_xyz_loc:
## for v in siku.tempc[i]:
## erf.write( str( geocoords.norm_lon(v[0]) )+'\t'+ \
## str( v[1] )+'\n' )
## /for debug
if st & element.Element.f_errored: ##
poly.write( '> -Gred -W0.1p,red \n' ) ##
## if
elif st & element.Element.f_special: ## elif -> if
poly.write( '> -Gpurple -W0.1p,pink \n' )
elif st & element.Element.f_static:
## return
poly.write( '> -Gbrown -W0.1p,lightBlue \n' )#<<--- this
## poly.write( '> -GlightCyan -W0.1p,lightBlue \n' )
elif st & element.Element.f_steady:
poly.write( '> -GlightGreen -W0.1p,lightBlue \n' )
else:
poly.write( '> -GlightCyan -W0.1p,lightBlue \n' )
for v in vert:
poly.write( str( geocoords.norm_lon(v[0]) )+'\t'+ \
str( v[1] )+'\n' )
return
# --------------------------------------------------------------------------
def winds_diag( t, winds ):
mesh = siku.diagnostics.meshes[0]
ez = mathutils.Vector( (0,0,1) )
###### Commented to stop that file breeding while other modules are being tested
## fp = open( 'winds-%02d.txt' % (siku.diagnostics.wind_counter), 'w' )
##
## for i, w in enumerate( winds ):
## x = mathutils.Vector( mesh[i] )
## u = mathutils.Vector( w )
## uval = u.length
## lon, lat = geocoords.lonlat_deg( x )
## a = ez - x
##
## mdl = a.length * uval
## if ( mdl != 0 ):
## azimuth = 180 * math.acos( (a*u) / mdl ) / math.pi
## fp.write( "%f %f %f %f %f\n" % \
## ( lon, lat, 0.25*uval, azimuth, 0.7*uval ) )
##
##
## fp.close()
siku.diagnostics.wind_counter += 1
return
# ---------------------------------------------------------------------
# Calling main function at the end
# ---------------------------------------------------------------------
siku.main = main()
if __name__ == '__main__':
sys.exit( siku.main )
| Atoku/siku | samples/beaufort.py | Python | gpl-2.0 | 14,677 | 0.025346 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections_abc.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class _CustomSequenceThatRaisesException(collections.Sequence):
def __len__(self):
return 1
def __getitem__(self, item):
raise ValueError("Cannot get item: %s" % item)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a sequence, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(
ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(
TypeError, self.bad_pack_pattern):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.keys()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.values()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.items()))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
structure3 = collections.defaultdict(list)
structure3["a"] = [1, 2, 3, 4]
structure3["b"] = [2, 3, 4, 5]
expected_structure3 = collections.defaultdict(list)
expected_structure3["a"] = [2, 3, 4, 5]
expected_structure3["b"] = [3, 4, 5, 6]
self.assertEqual(expected_structure3,
nest.map_structure(lambda x: x + 1, structure3))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
@test_util.run_deprecated_v1
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab),
shallow_length=len(inp_abc))):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree))
with self.assertRaisesRegexp(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree),
shallow_length=len(shallow_tree))):
get_paths_and_values(shallow_tree, input_tree)
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
def testFlattenCustomSequenceThatRaisesException(self): # b/140746865
seq = _CustomSequenceThatRaisesException()
with self.assertRaisesRegexp(ValueError, "Cannot get item"):
nest.flatten(seq)
def testListToTuple(self):
input_sequence = [1, (2, {3: [4, 5, (6,)]}, None, 7, [[[8]]])]
expected = (1, (2, {3: (4, 5, (6,))}, None, 7, (((8,),),)))
nest.assert_same_structure(
nest.list_to_tuple(input_sequence),
expected,
)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
| gunan/tensorflow | tensorflow/python/util/nest_test.py | Python | apache-2.0 | 54,367 | 0.003532 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import endpoints
class EndpointTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(EndpointTests, self).setUp()
self.key = 'endpoint'
self.collection_key = 'endpoints'
self.model = endpoints.Endpoint
self.manager = self.client.endpoints
def new_ref(self, **kwargs):
kwargs = super(EndpointTests, self).new_ref(**kwargs)
kwargs.setdefault('interface', 'public')
kwargs.setdefault('region', uuid.uuid4().hex)
kwargs.setdefault('service_id', uuid.uuid4().hex)
kwargs.setdefault('url', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
return kwargs
def test_create_public_interface(self):
ref = self.new_ref(interface='public')
self.test_create(ref)
def test_create_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_create(ref)
def test_create_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_create(ref)
def test_create_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
self.assertRaises(Exception, self.manager.create,
**utils.parameterize(ref))
def test_update_public_interface(self):
ref = self.new_ref(interface='public')
self.test_update(ref)
def test_update_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_update(ref)
def test_update_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_update(ref)
def test_update_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
self.assertRaises(Exception, self.manager.update,
**utils.parameterize(ref))
def test_list_public_interface(self):
interface = 'public'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_admin_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_internal_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_invalid_interface(self):
interface = uuid.uuid4().hex
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.assertRaises(Exception, self.manager.list,
expected_path=expected_path, interface=interface)
| citrix-openstack-build/python-keystoneclient | keystoneclient/tests/v3/test_endpoints.py | Python | apache-2.0 | 3,490 | 0 |
# -*- coding:utf-8 -*-
import unittest
import nose
from mobileclick import Iunit, Intent, Summary, SummaryError
class SummaryTestCase(unittest.TestCase):
def setUp(self):
self.qid = 'MC2-E-0001'
self.i1 = Intent(self.qid, '%s-INTENT0001' % self.qid, 'Link')
self.i2 = Intent(self.qid, '%s-INTENT0002' % self.qid, 'Link')
self.u1 = Iunit(self.qid, '%s-0001' % self.qid, 'A')
self.u2 = Iunit(self.qid, '%s-0002' % self.qid, 'A')
self.u3 = Iunit(self.qid, '%s-0003' % self.qid, 'A')
self.u4 = Iunit(self.qid, '%s-0004' % self.qid, 'A')
self.first = [self.u1, self.i1, self.i2]
self.seconds = {
self.i1.iid: [self.u2],
self.i2.iid: [self.u3, self.u4]
}
self.summary = Summary(self.qid, self.first, self.seconds)
def test_summary_init(self):
'''
Summary.__init__ (validation)
'''
self.assertRaises(SummaryError, Summary, self.qid, self.first, {})
self.assertRaises(SummaryError, Summary, self.qid, [], self.seconds)
self.assertRaises(SummaryError, Summary, self.qid, [1], {})
self.assertRaises(SummaryError, Summary, self.qid,
[self.i1], {self.i1.iid: [self.i2]})
self.assertRaises(SummaryError, Summary, self.qid,
[self.i1, self.i1], {self.i1.iid: [self.u2]})
self.assertRaises(SummaryError, Summary, self.qid,
[Iunit('MC2-E-0002', '0001', 'A')])
def test_summary_property(self):
'''
Summary.first and Summary.second(iid)
'''
self.assertEqual(self.summary.qid, self.qid)
self.assertEqual(len(self.summary.first), 3)
self.assertIsInstance(self.summary.first, tuple)
self.assertEqual(self.summary.first[0].uid, 'MC2-E-0001-0001')
iid = 'MC2-E-0001-INTENT0001'
self.assertIsInstance(self.summary.second(iid), tuple)
self.assertEqual(self.summary.second(iid)[0].uid, 'MC2-E-0001-0002')
iid = 'MC2-E-0001-INTENT0002'
self.assertEqual(self.summary.second(iid)[0].uid, 'MC2-E-0001-0003')
def test_summary_add(self):
'''
Summary.add
'''
s = Summary(self.qid)
s.add(self.i1)
self.assertRaises(SummaryError, s.add, self.i1)
s.add(self.u1)
s.add(self.u2, self.i1.iid)
self.assertRaises(SummaryError, s.add, self.u3, self.i2.iid)
self.assertRaises(SummaryError, s.add, self.i2, self.i2.iid)
s.add(self.i2)
s.add(self.u3, self.i2.iid)
s.add(self.u4, self.i2.iid)
self.assertRaises(SummaryError, s.add, self.i2)
self.assertEqual(s.first[0].iid, self.i1.iid)
self.assertEqual(s.first[1].uid, self.u1.uid)
self.assertEqual(s.second(self.i1.iid)[0].uid, self.u2.uid)
self.assertEqual(s.first[2].iid, self.i2.iid)
self.assertEqual(s.second(self.i2.iid)[0].uid, self.u3.uid)
self.assertEqual(s.second(self.i2.iid)[1].uid, self.u4.uid)
def test_summary_to_xml(self):
'''
Summary.to_xml
'''
from xml.etree.ElementTree import tostring
xml = self.summary.to_xml()
xmlstr = tostring(xml, 'utf-8')
self.assertEqual(xmlstr,
'''<result qid="MC2-E-0001"><first><iunit uid="MC2-E-0001-0001" /><link iid="MC2-E-0001-INTENT0001" /><link iid="MC2-E-0001-INTENT0002" /></first><second iid="MC2-E-0001-INTENT0001"><iunit uid="MC2-E-0001-0002" /></second><second iid="MC2-E-0001-INTENT0002"><iunit uid="MC2-E-0001-0003" /><iunit uid="MC2-E-0001-0004" /></second></result>''')
if __name__ == '__main__':
nose.main(argv=['nose', '-v'])
| mpkato/mobileclick | tests/summary_test.py | Python | mit | 3,680 | 0.003261 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# module builder script
#
import os, sys, shutil, tempfile, subprocess, platform
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
support_dir = os.path.join(template_dir, 'support')
sdk_dir = os.path.dirname(template_dir)
android_support_dir = os.path.join(sdk_dir, 'android')
sys.path.extend([sdk_dir, support_dir, android_support_dir])
from androidsdk import AndroidSDK
from manifest import Manifest
import traceback, uuid, time, thread, string, markdown
from os.path import join, splitext, split, exists
def run_pipe(args, cwd=None):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, cwd=cwd)
def print_emulator_line(line):
if line:
s = line.strip()
if s!='':
if s.startswith("["):
print s
else:
print "[DEBUG] %s" % s
sys.stdout.flush()
def run_python(args, cwd=None):
args.insert(0, sys.executable)
return run(args, cwd=cwd)
def run(args, cwd=None):
proc = run_pipe(args, cwd)
rc = None
while True:
print_emulator_line(proc.stdout.readline())
rc = proc.poll()
if rc!=None: break
return rc
def run_ant(project_dir):
build_xml = os.path.join(project_dir, 'build.xml')
ant = 'ant'
if 'ANT_HOME' in os.environ:
ant = os.path.join(os.environ['ANT_HOME'], 'bin', 'ant')
if platform.system() == 'Windows':
ant += '.bat'
ant_args = [ant, '-f', build_xml]
if platform.system() == 'Windows':
ant_args = ['cmd.exe', '/C'] + ant_args
else:
# wrap with /bin/sh in Unix, in some cases the script itself isn't executable
ant_args = ['/bin/sh'] + ant_args
run(ant_args, cwd=project_dir)
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn','CVS'];
android_sdk = None
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = os.path.join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def is_ios(platform):
return platform == 'iphone' or platform == 'ipad' or platform == 'ios'
def is_android(platform):
return platform == 'android'
def stage(platform, project_dir, manifest, callback):
dont_delete = True
dir = tempfile.mkdtemp('ti','m')
print '[DEBUG] Staging module project at %s' % dir
try:
name = manifest.name
moduleid = manifest.moduleid
version = manifest.version
script = os.path.join(template_dir,'..','project.py')
# create a temporary proj
create_project_args = [script, name, moduleid, dir, platform]
if is_android(platform):
create_project_args.append(android_sdk.get_android_sdk())
run_python(create_project_args)
gen_project_dir = os.path.join(dir, name)
gen_resources_dir = os.path.join(gen_project_dir, 'Resources')
# copy in our example source
copy_resources(os.path.join(project_dir,'example'), gen_resources_dir)
# patch in our tiapp.xml
tiapp = os.path.join(gen_project_dir, 'tiapp.xml')
xml = open(tiapp).read()
tiappf = open(tiapp,'w')
xml = xml.replace('<guid/>','<guid></guid>')
xml = xml.replace('</guid>','</guid>\n<modules>\n<module version="%s">%s</module>\n</modules>\n' % (version,moduleid))
# generate a guid since this is currently done by developer
guid = str(uuid.uuid4())
xml = xml.replace('<guid></guid>','<guid>%s</guid>' % guid)
tiappf.write(xml)
tiappf.close()
module_dir = os.path.join(gen_project_dir,'modules',platform)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
module_zip_name = '%s-%s-%s.zip' % (moduleid.lower(), platform, version)
module_zip = os.path.join(project_dir, 'dist', module_zip_name)
if is_ios(platform):
module_zip = os.path.join(project_dir, module_zip_name)
script = os.path.join(project_dir,'build.py')
run_python([script])
elif is_android(platform):
run_ant(project_dir)
shutil.copy(module_zip, gen_project_dir)
callback(gen_project_dir)
except:
dont_delete = True
traceback.print_exc(file=sys.stderr)
sys.exit(1)
finally:
if not dont_delete: shutil.rmtree(dir)
def docgen(module_dir, dest_dir):
if not os.path.exists(dest_dir):
print "Creating dir: %s" % dest_dir
os.makedirs(dest_dir)
doc_dir = os.path.join(module_dir, 'documentation')
if not os.path.exists(doc_dir):
print "Couldn't find documentation file at: %s" % doc_dir
return
for file in os.listdir(doc_dir):
if file in ignoreFiles or os.path.isdir(os.path.join(doc_dir, file)):
continue
md = open(os.path.join(doc_dir, file), 'r').read()
html = markdown.markdown(md)
filename = string.replace(file, '.md', '.html')
filepath = os.path.join(dest_dir, filename)
print 'Generating %s' % filepath
open(filepath, 'w+').write(html)
# a simplified .properties file parser
def read_properties(file):
properties = {}
for line in file.read().splitlines():
line = line.strip()
if len(line) > 0 and line[0] == '#': continue
if len(line) == 0 or '=' not in line: continue
key, value = line.split('=', 1)
properties[key.strip()] = value.strip().replace('\\\\', '\\')
return properties
def main(args):
global android_sdk
# command platform project_dir
command = args[1]
platform = args[2]
project_dir = os.path.expanduser(args[3])
manifest = Manifest(os.path.join(project_dir, 'manifest'))
error = False
if is_android(platform):
build_properties = read_properties(open(os.path.join(project_dir, 'build.properties')))
android_sdk_path = os.path.dirname(os.path.dirname(build_properties['android.platform']))
android_sdk = AndroidSDK(android_sdk_path)
if command == 'run':
def run_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir,'..',platform,'builder.py'))
script_args = [script, 'run', gen_project_dir]
if is_android(platform):
script_args.append(android_sdk.get_android_sdk())
rc = run_python(script_args)
# run the project
if rc==1:
if is_ios(platform):
error = os.path.join(gen_project_dir,'build','iphone','build','build.log')
print "[ERROR] Build Failed. See: %s" % os.path.abspath(error)
else:
print "[ERROR] Build Failed."
stage(platform, project_dir, manifest, run_callback)
elif command == 'run-emulator':
if is_android(platform):
def run_emulator_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir, '..', platform, 'builder.py'))
run_python([script, 'run-emulator', gen_project_dir, android_sdk.get_android_sdk()])
stage(platform, project_dir, manifest, run_emulator_callback)
elif command == 'docgen':
if is_android(platform):
dest_dir = args[4]
docgen(project_dir, dest_dir)
if error:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| arnaudsj/titanium_mobile | support/module/builder.py | Python | apache-2.0 | 7,110 | 0.03488 |
import numpy as np
import os
from mpEntropy import mpSystem
import matplotlib as mpl
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
from scipy.signal import savgol_filter
from scipy.optimize import curve_fit
# This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Load sysVar
sysVar = mpSystem("../interact_0.ini", plotOnly=True)
# Create plot folder
pltfolder = "./epsplots/"
if not os.path.exists(pltfolder):
os.mkdir(pltfolder)
print("Plotting", end='')
mpl.use('Agg')
# minimum and maximum times to plot
min_time = 0
max_time = 3
inlay_min_time = 10
inlay_max_time = 100
inlay_log_min_time = 0
inlay_log_max_time = 3
# styles and stuff
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
legend_size = 10
font_size = 10
# https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
# padding in units of fontsize
padding = 0.32
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.linewidth': 1,
'figure.figsize': fig_size,
'legend.frameon': False,
'legend.loc': 'best',
'mathtext.default': 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize'] = 0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
loavgpercent = sysVar.plotLoAvgPerc # percentage of time evolution to start averaging
loavgind = int(loavgpercent * sysVar.dataPoints) # index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale), 2)
# stuff for averaging
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime, end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
ent_array = np.loadtxt('../data/entropy.txt')
# multiply step array with time scale
step_array = ent_array[:, 0] * sysVar.plotTimeScale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
#### Complete system Entropy
if os.path.isfile('../data/total_entropy.txt'):
totent_array = np.loadtxt('../data/total_entropy.txt')
plt.plot(totent_array[min_index:max_index, 0] * sysVar.plotTimeScale, totent_array[min_index:max_index, 1] * 1e13,
linewidth=0.6, color='r')
plt.grid()
plt.xlabel(r'$J\,t$')
plt.ylabel(r'Total system entropy $/ 10^{-13}$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'entropy_total.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
### Subsystem Entropy
fldat = open(pltfolder + 'ent_fluctuation_N' + str(sysVar.N) + '.txt', 'w')
fldat.write('N_tot: %i\n' % sysVar.N)
avg = np.mean(ent_array[loavgind:, 1], dtype=np.float64)
stddev = np.std(ent_array[loavgind:, 1], dtype=np.float64)
fldat.write('ssent_average: %.16e\n' % avg)
fldat.write('ssent_stddev: %.16e\n' % stddev)
fldat.write('ssent_rel._fluctuation: %.16e\n' % (stddev / avg))
fldat.close()
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with logarithmic inlay
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.5, .35, .4, .4])
plt.semilogy(step_array[inlay_log_min_index:inlay_log_max_index],
np.abs(avg - ent_array[inlay_log_min_index:inlay_log_max_index, 1]), color='r')
plt.ylabel(r'$|\,\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)|$')
plt.yticks([])
plt.savefig(pltfolder + 'entropy_subsystem_inlay_log.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with inlay
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.45, .35, .4, .4])
plt.plot(step_array[inlay_min_index:inlay_max_index], avg - ent_array[inlay_min_index:inlay_max_index, 1],
linewidth=0.2, color='r')
plt.ylabel(r'$\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)$')
a.yaxis.tick_right()
tmp_ticks = list(a.get_xticks())
tmp_ticks.pop(0)
if tmp_ticks[-1] >= inlay_max_time:
tmp_ticks.pop(-1)
a.set_xticks(tmp_ticks + [inlay_min_time])
plt.savefig(pltfolder + 'entropy_subsystem_inlay.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# histogram of fluctuations
n, bins, patches = plt.hist(ent_array[loavgind:, 1] - avg, 51, normed=1, rwidth=0.8, align='mid')
(mu, sigma) = norm.fit(ent_array[loavgind:, 1] - avg)
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r--')
mu_magnitude = np.floor(np.log10(np.abs(mu)))
mu /= np.power(10, mu_magnitude)
sigma_magnitude = np.floor(np.log10(sigma))
sigma /= np.power(10, sigma_magnitude)
plt.figtext(0.965, 0.80,
'$\mu = %.2f \cdot 10^{%i}$\n$\sigma = %.2f \cdot 10^{%i}$' % (mu, mu_magnitude, sigma, sigma_magnitude),
ha='right', va='bottom', multialignment="left")
plt.xlabel(r'$\Delta S_{sub}$')
plt.ylabel(r'PD')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem_fluctuations.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
print(" done!")
| marvinlenk/subsystem_entropy_epsplots | pyplot_eps/ent_eps.py | Python | bsd-2-clause | 6,836 | 0.00395 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* the usefulness of unicode, if it works correctly..
* dropping and creating a table
* inserting and selecting a row
"""
info = """
For this to work you need to make sure your terminal can output
unicode character correctly. Check if the encoding of your terminal
is set to UTF-8.
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Show the unicode string we're going to use
unistr = u"\u00bfHabla espa\u00f1ol?"
output.append("Unicode string: %s" % unistr.encode('utf8'))
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS unicode"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE unicode (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(50) DEFAULT '' NOT NULL,
PRIMARY KEY (id)
) CHARACTER SET 'utf8'"""
cursor.execute(stmt_create)
# Insert a row
stmt_insert = "INSERT INTO unicode (str) VALUES (%s)"
cursor.execute(stmt_insert, (unistr,))
# Select it again and show it
stmt_select = "SELECT str FROM unicode WHERE id = %s"
cursor.execute(stmt_select, (1,))
row = cursor.fetchone()
output.append("Unicode string coming from db: %s" % row[0].encode('utf8'))
# Cleaning up, dropping the table again
cursor.execute(stmt_drop)
cursor.close()
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config import Config
config = Config.dbinfo().copy()
print info
out = main(config)
print '\n'.join(out)
| mitchcapper/mythbox | resources/lib/mysql-connector-python/python2/examples/unicode.py | Python | gpl-2.0 | 2,921 | 0.003766 |
# coding=utf-8
"""
The SNMPRawCollector is designed for collecting data from SNMP-enables devices,
using a set of specified OIDs
#### Configuration
Below is an example configuration for the SNMPRawCollector. The collector
can collect data any number of devices by adding configuration sections
under the *devices* header. By default the collector will collect every 60
seconds. This might be a bit excessive and put unnecessary load on the
devices being polled. You may wish to change this to every 300 seconds. However
you need modify your graphite data retentions to handle this properly.
```
# Options for SNMPRawCollector
enabled = True
interval = 60
[devices]
# Start the device configuration
# Note: this name will be used in the metric path.
[[my-identification-for-this-host]]
host = localhost
port = 161
community = public
# Start the OID list for this device
# Note: the value part will be used in the metric path.
[[[oids]]]
1.3.6.1.4.1.2021.10.1.3.1 = cpu.load.1min
1.3.6.1.4.1.2021.10.1.3.2 = cpu.load.5min
1.3.6.1.4.1.2021.10.1.3.3 = cpu.load.15min
# If you want another host, you can. But you probably won't need it.
[[another-identification]]
host = router1.example.com
port = 161
community = public
[[[oids]]]
oid = metric.path
oid = metric.path
```
Note: If you modify the SNMPRawCollector configuration, you will need to
restart diamond.
#### Dependencies
* pysmnp (which depends on pyasn1 0.1.7 and pycrypto)
"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def __init__(self, *args, **kwargs):
super(SNMPRawCollector, self).__init__(*args, **kwargs)
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. restart diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{0}\' on \'{1}\', because: {2}'.format(
oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{0}\': [{1}]'.format(
device, data))
if len(data) != 1:
self._skip(
device,
oid,
'unexpected response, data has {0} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{0}\': [{1}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{0}\''.format(device))
dev_config = self.config['devices'][device]
if 'oids' in dev_config:
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp,
precision=self._precision(value),
metric_type='GAUGE')
self.publish_metric(metric)
| disqus/Diamond | src/collectors/snmpraw/snmpraw.py | Python | mit | 6,089 | 0.000493 |
import os
import sys
import shutil
import subprocess
BASEDIR,BASEFILE = os.path.split(os.path.abspath(__file__))
print(BASEDIR,BASEFILE,os.getcwd())
par_dir,cur_dir = os.path.split(BASEDIR)
src_dir = os.path.join(par_dir,'AnimeWatch-PyQt5')
deb_config_dir = os.path.join(BASEDIR,'DEBIAN')
control_file = os.path.join(deb_config_dir,'control')
lines = open(control_file,'r').readlines()
dest_dir = None
exec_file = os.path.join(BASEDIR,'anime-watch')
desk_file = os.path.join(BASEDIR,'AnimeWatch.desktop')
for i in lines:
i = i.strip()
if i.startswith('Version:'):
version_num = i.replace('Version:','',1).strip()
dest_dir = os.path.join(BASEDIR,'AnimeWatch-'+version_num)
break
usr_share = os.path.join(dest_dir,'usr','share','applications')
usr_bin = os.path.join(dest_dir,'usr','bin')
usr_share_animewatch = os.path.join(dest_dir,'usr','share','AnimeWatch')
if dest_dir:
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
os.makedirs(usr_share)
os.makedirs(usr_bin)
shutil.copytree(deb_config_dir,os.path.join(dest_dir,'DEBIAN'))
shutil.copy(exec_file,usr_bin)
shutil.copy(desk_file,usr_share)
shutil.copytree(src_dir,usr_share_animewatch)
subprocess.call(['dpkg-deb','--build',dest_dir])
deb_pkg = os.path.basename(dest_dir)+'.deb'
print('deb package created successfully in current directory. Now install the package using command: \n\nsudo gdebi {0}\n\n'.format(deb_pkg))
else:
print('no version number in control file')
| kanishka-linux/AnimeWatch | AnimeWatch-Debian-PyQt5/create_deb.py | Python | gpl-3.0 | 1,480 | 0.032432 |
"""
Contains all classes and functions to deal with lists, dicts, generators and
iterators in general.
Array modifications
*******************
If the content of an array (``set``/``list``) is requested somewhere, the
current module will be checked for appearances of ``arr.append``,
``arr.insert``, etc. If the ``arr`` name points to an actual array, the
content will be added
This can be really cpu intensive, as you can imagine. Because |jedi| has to
follow **every** ``append`` and check wheter it's the right array. However this
works pretty good, because in *slow* cases, the recursion detector and other
settings will stop this process.
It is important to note that:
1. Array modfications work only in the current module.
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
"""
from jedi import debug
from jedi import settings
from jedi import common
from jedi.common import unite, safe_property
from jedi._compatibility import unicode, zip_longest, is_py3
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi.evaluate import context
from jedi.evaluate import precedence
from jedi.evaluate import recursion
from jedi.evaluate.cache import memoize_default
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, \
ParserTreeFilter
class AbstractSequence(context.Context):
builtin_methods = {}
api_type = 'instance'
def __init__(self, evaluator):
super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS)
def get_filters(self, search_global, until_position=None, origin_scope=None):
raise NotImplementedError
@property
def name(self):
return compiled.CompiledContextName(self, self.array_type)
class BuiltinMethod(object):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
def __init__(self, builtin_context, method, builtin_func):
self._builtin_context = builtin_context
self._method = method
self._builtin_func = builtin_func
def py__call__(self, params):
return self._method(self._builtin_context)
def __getattr__(self, name):
return getattr(self._builtin_func, name)
class SpecialMethodFilter(DictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_context):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_context = builtin_context
def infer(self):
filter = next(self._builtin_context.get_filters())
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
builtin_func = next(iter(filter.get(self.string_name)[0].infer()))
return set([BuiltinMethod(self.parent_context, self._callable, builtin_func)])
def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct)
self.context = context
self._builtin_context = builtin_context
"""
This context is what will be used to introspect the name, where as the
other context will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.context, name, value, self._builtin_context)
def has_builtin_methods(cls):
base_dct = {}
# Need to care properly about inheritance. Builtin Methods should not get
# lost, just because they are not mentioned in a class.
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.builtin_methods)
except AttributeError:
pass
cls.builtin_methods = base_dct
for func in cls.__dict__.values():
try:
cls.builtin_methods.update(func.registered_builtin_methods)
except AttributeError:
pass
return cls
def register_builtin_method(method_name, python_version_match=None):
def wrapper(func):
if python_version_match and python_version_match != 2 + int(is_py3):
# Some functions do only apply to certain versions.
return func
dct = func.__dict__.setdefault('registered_builtin_methods', {})
dct[method_name] = func
return func
return wrapper
@has_builtin_methods
class GeneratorMixin(object):
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@register_builtin_method('__next__', python_version_match=3)
def py__next__(self):
# TODO add TypeError if params are given.
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def get_filters(self, search_global, until_position=None, origin_scope=None):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
yield SpecialMethodFilter(self, self.builtin_methods, gen_obj)
for filter in gen_obj.get_filters(search_global):
yield filter
def py__bool__(self):
return True
def py__class__(self):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
return gen_obj.py__class__()
@property
def name(self):
return compiled.CompiledContextName(self, 'generator')
class Generator(GeneratorMixin, context.Context):
"""Handling of `yield` functions."""
def __init__(self, evaluator, func_execution_context):
super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS)
self._func_execution_context = func_execution_context
def py__iter__(self):
return self._func_execution_context.get_yield_values()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
class CompForContext(context.TreeContext):
@classmethod
def from_comp_for(cls, parent_context, comp_for):
return cls(parent_context.evaluator, parent_context, comp_for)
def __init__(self, evaluator, parent_context, comp_for):
super(CompForContext, self).__init__(evaluator, parent_context)
self.tree_node = comp_for
def get_node(self):
return self.tree_node
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield ParserTreeFilter(self.evaluator, self)
class Comprehension(AbstractSequence):
@staticmethod
def from_atom(evaluator, context, atom):
bracket = atom.children[0]
if bracket == '{':
if atom.children[1].children[1] == ':':
cls = DictComprehension
else:
cls = SetComprehension
elif bracket == '(':
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
return cls(evaluator, context, atom)
def __init__(self, evaluator, defining_context, atom):
super(Comprehension, self).__init__(evaluator)
self._defining_context = defining_context
self._atom = atom
def _get_comprehension(self):
# The atom contains a testlist_comp
return self._atom.children[1]
def _get_comp_for(self):
# The atom contains a testlist_comp
return self._get_comprehension().children[1]
def _eval_node(self, index=0):
"""
The first part `x + 1` of the list comprehension:
[x + 1 for x in foo]
"""
return self._get_comprehension().children[index]
@memoize_default()
def _get_comp_for_context(self, parent_context, comp_for):
# TODO shouldn't this be part of create_context?
return CompForContext.from_comp_for(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
evaluator = self.evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node)
iterated = py__iter__(evaluator, input_types, input_node)
exprlist = comp_for.children[1]
for i, lazy_context in enumerate(iterated):
types = lazy_context.infer()
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
context = self._get_comp_for_context(
parent_context,
comp_for,
)
with helpers.predefine_names(context, comp_for, dct):
try:
for result in self._nested(comp_fors[1:], context):
yield result
except IndexError:
iterated = context.eval_node(self._eval_node())
if self.array_type == 'dict':
yield iterated, context.eval_node(self._eval_node(2))
else:
yield iterated
@memoize_default(default=[])
@common.to_list
def _iterate(self):
comp_fors = tuple(self._get_comp_for().get_comp_fors())
for result in self._nested(comp_fors):
yield result
def py__iter__(self):
for set_ in self._iterate():
yield context.LazyKnownContexts(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._atom)
class ArrayMixin(object):
def get_filters(self, search_global, until_position=None, origin_scope=None):
# `array.type` is a string with the type, e.g. 'list'.
compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type)
yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj)
for typ in compiled_obj.execute_evaluated(self):
for filter in typ.get_filters():
yield filter
def py__bool__(self):
return None # We don't know the length, because of appends.
def py__class__(self):
return compiled.builtin_from_name(self.evaluator, self.array_type)
@safe_property
def parent(self):
return self.evaluator.BUILTINS
def dict_values(self):
return unite(self._defining_context.eval_node(v) for k, v in self._items())
class ListComprehension(ArrayMixin, Comprehension):
array_type = 'list'
def py__getitem__(self, index):
if isinstance(index, slice):
return set([self])
all_types = list(self.py__iter__())
return all_types[index].infer()
class SetComprehension(ArrayMixin, Comprehension):
array_type = 'set'
@has_builtin_methods
class DictComprehension(ArrayMixin, Comprehension):
array_type = 'dict'
def _get_comp_for(self):
return self._get_comprehension().children[3]
def py__iter__(self):
for keys, values in self._iterate():
yield context.LazyKnownContexts(keys)
def py__getitem__(self, index):
for keys, values in self._iterate():
for k in keys:
if isinstance(k, compiled.CompiledObject):
if k.obj == index:
return values
return self.dict_values()
def dict_values(self):
return unite(values for keys, values in self._iterate())
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
items = set(
FakeSequence(
self.evaluator, 'tuple'
(context.LazyKnownContexts(keys), context.LazyKnownContexts(values))
) for keys, values in self._iterate()
)
return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list')
class GeneratorComprehension(GeneratorMixin, Comprehension):
pass
class SequenceLiteralContext(ArrayMixin, AbstractSequence):
mapping = {'(': 'tuple',
'[': 'list',
'{': 'set'}
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self.atom = atom
self._defining_context = defining_context
if self.atom.type in ('testlist_star_expr', 'testlist'):
self.array_type = 'tuple'
else:
self.array_type = SequenceLiteralContext.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
def py__getitem__(self, index):
"""Here the index is an int/str. Raises IndexError/KeyError."""
if self.array_type == 'dict':
for key, value in self._items():
for k in self._defining_context.eval_node(key):
if isinstance(k, compiled.CompiledObject) \
and index == k.obj:
return self._defining_context.eval_node(value)
raise KeyError('No key found in dictionary %s.' % self)
# Can raise an IndexError
if isinstance(index, slice):
return set([self])
else:
return self._defining_context.eval_node(self._items()[index])
def py__iter__(self):
"""
While values returns the possible values for any array field, this
function returns the value for a certain index.
"""
if self.array_type == 'dict':
# Get keys.
types = set()
for k, _ in self._items():
types |= self._defining_context.eval_node(k)
# We don't know which dict index comes first, therefore always
# yield all the types.
for _ in types:
yield context.LazyKnownContexts(types)
else:
for node in self._items():
yield context.LazyTreeContext(self._defining_context, node)
for addition in check_array_additions(self._defining_context, self):
yield addition
def _values(self):
"""Returns a list of a list of node."""
if self.array_type == 'dict':
return unite(v for k, v in self._items())
else:
return self._items()
def _items(self):
c = self.atom.children
if self.atom.type in ('testlist_star_expr', 'testlist'):
return c[::2]
array_node = c[1]
if array_node in (']', '}', ')'):
return [] # Direct closing bracket, doesn't contain items.
if array_node.type == 'testlist_comp':
return array_node.children[::2]
elif array_node.type == 'dictorsetmaker':
kv = []
iterator = iter(array_node.children)
for key in iterator:
op = next(iterator, None)
if op is None or op == ',':
kv.append(key) # A set.
else:
assert op == ':' # A dict.
kv.append((key, next(iterator)))
next(iterator, None) # Possible comma.
return kv
else:
return [array_node]
def exact_key_items(self):
"""
Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy contexts.
"""
for key_node, value in self._items():
for key in self._defining_context.eval_node(key_node):
if precedence.is_string(key):
yield key.obj, context.LazyTreeContext(self._defining_context, value)
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
@has_builtin_methods
class DictLiteralContext(SequenceLiteralContext):
array_type = 'dict'
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self._defining_context = defining_context
self.atom = atom
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
lazy_contexts = [
context.LazyKnownContext(FakeSequence(
self.evaluator, 'tuple',
(context.LazyTreeContext(self._defining_context, key_node),
context.LazyTreeContext(self._defining_context, value_node))
)) for key_node, value_node in self._items()
]
return set([FakeSequence(self.evaluator, 'list', lazy_contexts)])
class _FakeArray(SequenceLiteralContext):
def __init__(self, evaluator, container, type):
super(SequenceLiteralContext, self).__init__(evaluator)
self.array_type = type
self.atom = container
# TODO is this class really needed?
class ImplicitTuple(_FakeArray):
def __init__(self, evaluator, testlist):
super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple')
raise NotImplementedError
self._testlist = testlist
def _items(self):
return self._testlist.children[::2]
class FakeSequence(_FakeArray):
def __init__(self, evaluator, array_type, lazy_context_list):
"""
type should be one of "tuple", "list"
"""
super(FakeSequence, self).__init__(evaluator, None, array_type)
self._lazy_context_list = lazy_context_list
def _items(self):
raise DeprecationWarning
return self._context_list
def py__getitem__(self, index):
return set(self._lazy_context_list[index].infer())
def py__iter__(self):
return self._lazy_context_list
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
class FakeDict(_FakeArray):
def __init__(self, evaluator, dct):
super(FakeDict, self).__init__(evaluator, dct, 'dict')
self._dct = dct
def py__iter__(self):
for key in self._dct:
yield context.LazyKnownContext(compiled.create(self.evaluator, key))
def py__getitem__(self, index):
return self._dct[index].infer()
def dict_values(self):
return unite(lazy_context.infer() for lazy_context in self._dct.values())
def _items(self):
raise DeprecationWarning
for key, values in self._dct.items():
# TODO this is not proper. The values could be multiple values?!
yield key, values[0]
def exact_key_items(self):
return self._dct.items()
class MergedArray(_FakeArray):
def __init__(self, evaluator, arrays):
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
self._arrays = arrays
def py__iter__(self):
for array in self._arrays:
for lazy_context in array.py__iter__():
yield lazy_context
def py__getitem__(self, index):
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def _items(self):
for array in self._arrays:
for a in array._items():
yield a
def __len__(self):
return sum(len(a) for a in self._arrays)
def unpack_tuple_to_dict(evaluator, types, exprlist):
"""
Unpacking tuple assignments in for statements and expr_stmts.
"""
if exprlist.type == 'name':
return {exprlist.value: types}
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1])
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
'testlist_star_expr'):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for lazy_context in py__iter__(evaluator, types, exprlist):
n += 1
try:
part = next(parts)
except StopIteration:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n)
else:
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
has_parts = next(parts, None)
if types and has_parts is not None:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts,
message="ValueError: need more than %s values to unpack" % n)
return dct
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
# Something like ``arr[x], var = ...``.
# This is something that is not yet supported, would also be difficult
# to write into a dict.
return {}
elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings
# Currently we're not supporting them.
return {}
raise NotImplementedError
def py__iter__(evaluator, types, node=None):
debug.dbg('py__iter__')
type_iters = []
for typ in types:
try:
iter_method = typ.py__iter__
except AttributeError:
if node is not None:
# TODO this context is probably not right.
analysis.add(typ, 'type-error-not-iterable', node,
message="TypeError: '%s' object is not iterable" % typ)
else:
type_iters.append(iter_method())
for lazy_contexts in zip_longest(*type_iters):
yield context.get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def py__iter__types(evaluator, types, node=None):
"""
Calls `py__iter__`, but ignores the ordering in the end and just returns
all types that it contains.
"""
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
def py__getitem__(evaluator, context, types, trailer):
from jedi.evaluate.representation import ClassContext
from jedi.evaluate.instance import TreeInstance
result = set()
trailer_op, node, trailer_cl = trailer.children
assert trailer_op == "["
assert trailer_cl == "]"
# special case: PEP0484 typing module, see
# https://github.com/davidhalter/jedi/issues/663
for typ in list(types):
if isinstance(typ, (ClassContext, TreeInstance)):
typing_module_types = pep0484.py__getitem__(context, typ, node)
if typing_module_types is not None:
types.remove(typ)
result |= typing_module_types
if not types:
# all consumed by special cases
return result
for index in create_index_types(evaluator, context, node):
if isinstance(index, (compiled.CompiledObject, Slice)):
index = index.obj
if type(index) not in (float, int, str, unicode, slice):
# If the index is not clearly defined, we have to get all the
# possiblities.
for typ in list(types):
if isinstance(typ, AbstractSequence) and typ.array_type == 'dict':
types.remove(typ)
result |= typ.dict_values()
return result | py__iter__types(evaluator, types)
for typ in types:
# The actual getitem call.
try:
getitem = typ.py__getitem__
except AttributeError:
# TODO this context is probably not right.
analysis.add(context, 'type-error-not-subscriptable', trailer_op,
message="TypeError: '%s' object is not subscriptable" % typ)
else:
try:
result |= getitem(index)
except IndexError:
result |= py__iter__types(evaluator, set([typ]))
except KeyError:
# Must be a dict. Lists don't raise KeyErrors.
result |= typ.dict_values()
return result
def check_array_additions(context, sequence):
""" Just a mapper function for the internal _check_array_additions """
if sequence.array_type not in ('list', 'set'):
# TODO also check for dict updates
return set()
return _check_array_additions(context, sequence)
@memoize_default(default=set())
@debug.increase_indent
def _check_array_additions(context, sequence):
"""
Checks if a `Array` has "add" (append, insert, extend) statements:
>>> a = [""]
>>> a.append(1)
"""
from jedi.evaluate import param
debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
module_context = context.get_root_context()
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return set()
def find_additions(context, arglist, add_name):
params = list(param.TreeArguments(context.evaluator, context, arglist).unpack())
result = set()
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, lazy_context in params:
result.add(lazy_context)
elif add_name in ['extend', 'update']:
for key, lazy_context in params:
result |= set(py__iter__(context.evaluator, lazy_context.infer()))
return result
temp_param_add, settings.dynamic_params_for_other_modules = \
settings.dynamic_params_for_other_modules, False
is_list = sequence.name.string_name == 'list'
search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
added_types = set()
for add_name in search_names:
try:
possible_names = module_context.tree_node.used_names[add_name]
except KeyError:
continue
else:
for name in possible_names:
context_node = context.tree_node
if not (context_node.start_pos < name.start_pos < context_node.end_pos):
continue
trailer = name.parent
power = trailer.parent
trailer_pos = power.children.index(trailer)
try:
execution_trailer = power.children[trailer_pos + 1]
except IndexError:
continue
else:
if execution_trailer.type != 'trailer' \
or execution_trailer.children[0] != '(' \
or execution_trailer.children[1] == ')':
continue
random_context = context.create_context(name)
with recursion.execution_allowed(context.evaluator, power) as allowed:
if allowed:
found = helpers.evaluate_call_of_leaf(
random_context,
name,
cut_own_trailer=True
)
if sequence in found:
# The arrays match. Now add the results
added_types |= find_additions(
random_context,
execution_trailer.children[1],
add_name
)
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
return added_types
def get_dynamic_array_instance(instance):
"""Used for set() and list() instances."""
if not settings.dynamic_array_additions:
return instance.var_args
ai = _ArrayInstance(instance)
from jedi.evaluate import param
return param.ValuesArguments([[ai]])
class _ArrayInstance(object):
"""
Used for the usage of set() and list().
This is definitely a hack, but a good one :-)
It makes it possible to use set/list conversions.
In contrast to Array, ListComprehension and all other iterable types, this
is something that is only used inside `evaluate/compiled/fake/builtins.py`
and therefore doesn't need filters, `py__bool__` and so on, because
we don't use these operations in `builtins.py`.
"""
def __init__(self, instance):
self.instance = instance
self.var_args = instance.var_args
def py__iter__(self):
var_args = self.var_args
try:
_, lazy_context = next(var_args.unpack())
except StopIteration:
pass
else:
for lazy in py__iter__(self.instance.evaluator, lazy_context.infer()):
yield lazy
from jedi.evaluate import param
if isinstance(var_args, param.TreeArguments):
additions = _check_array_additions(var_args.context, self.instance)
for addition in additions:
yield addition
class Slice(context.Context):
def __init__(self, context, start, stop, step):
super(Slice, self).__init__(
context.evaluator,
parent_context=context.evaluator.BUILTINS
)
self._context = context
# all of them are either a Precedence or None.
self._start = start
self._stop = stop
self._step = step
@property
def obj(self):
"""
Imitate CompiledObject.obj behavior and return a ``builtin.slice()``
object.
"""
def get(element):
if element is None:
return None
result = self._context.eval_node(element)
if len(result) != 1:
# For simplicity, we want slices to be clear defined with just
# one type. Otherwise we will return an empty slice object.
raise IndexError
try:
return list(result)[0].obj
except AttributeError:
return None
try:
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
def create_index_types(evaluator, context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return set([Slice(context, None, None, None)])
elif index.type == 'subscript': # subscript is a slice operation.
# Like array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return set([Slice(context, *result)])
# No slices
return context.eval_node(index)
| tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/jedi/evaluate/iterable.py | Python | bsd-3-clause | 31,484 | 0.00127 |
class MimeError(Exception):
pass
class DecodingError(MimeError):
"""Thrown when there is an encoding error."""
pass
class EncodingError(MimeError):
"""Thrown when there is an decoding error."""
pass
| alex/flanker | flanker/mime/message/errors.py | Python | apache-2.0 | 223 | 0 |
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import logging
import pprint
class ConsumerMock:
def __init__(self):
self._num_flushes = 0
def flush(self):
self._num_flushes += 1
class MetricMock:
def __init__(self, event_limit=-1):
self._consumer = ConsumerMock()
self._calls = []
self._pp = pprint.PrettyPrinter(indent=2)
self._event_limit = event_limit
self.logger = logging.getLogger(__name__)
def track(self, distinct_id, event_name, properties={}, meta={}):
if self._event_limit < 0 or len(_calls) < self._event_limit:
self._calls.append( {'method':'track',
'distinct_id':distinct_id,
'event_name':event_name,
'properties':properties,
'meta':meta,
})
def submit(self, name, value, type, source, attributes):
self._calls.append({'method':'submit',
'name':name,
'value':value,
'type':type,
'source':source,
'attributes':attributes})
def dump_calls(self):
#self._pp.pprint(self._calls)
self.logger.info(self._calls)
def dump(self):
self.logger.info("Number of flushes: %g" % self._consumer._num_flushes)
self.dump_calls()
| ylow/SFrame | oss_src/unity/python/sframe/util/metric_mock.py | Python | bsd-3-clause | 1,457 | 0.019218 |
__author__ = 'k-sthan(II)'
def greeting(msg):
print(msg) | kvs6rj/cs3240-labdemo | helper.py | Python | mit | 61 | 0.032787 |
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2010 Camille "nephthys" Bouiller <[email protected]>
InsideTags is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db.models import Q
from django.core.cache import cache
from django.http import HttpRequest
from django.utils.cache import get_cache_key
from django.utils.safestring import mark_safe
from django.conf import settings
import re, urllib
def expire_page(path):
request = HttpRequest()
request.path = path
key = get_cache_key(request)
if cache.has_key(key):
cache.delete(key)
def not_combining(char):
return unicodedata.category(char) != 'Mn'
def strip_accents(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii','ignore')
return value
def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
'''
Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
Source : http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
'''
Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
Source : http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def url_encode(url):
dict = urllib.urlencode({'key': url})
return dict[4:]
def url_decode(url):
return urllib.unquote_plus(url)
BITLY_LOGIN = getattr(settings, 'BITLY_LOGIN', None)
BITLY_APIKEY = getattr(settings, 'BITLY_APIKEY', None)
TWITTER_LOGIN = getattr(settings, 'TWITTER_LOGIN', None)
TWITTER_PASS = getattr(settings, 'TWITTER_PASSWORD', None)
def shorten_url(long_url, login_user, api_key):
values = {
'version': '2.0.1',
'longUrl': long_url,
'login': BITLY_LOGIN,
'apiKey': BITLY_APIKEY
}
params = urllib.urlencode(values)
request = urllib.urlopen('http://api.bit.ly/shorten?%s' % params)
responde = request.read()
request.close()
responde_dict = eval(responde)
try:
short_url = responde_dict['results'][long_url]['shortUrl']
except:
print responde_dict
pass
return short_url
def post_to_twitter(url, title, tags):
if not BITLY_LOGIN or not BITLY_APIKEY or not TWITTER_LOGIN or not TWITTER_PASS:
return
import twitter
url = shorten_url(url, BITLY_LOGIN, BITLY_APIKEY)
tweet = '%s %s' % (title, url)
hashtags = ''
if tags:
tags = tags.replace(',', '')
new_tags = list()
for tag in tags.split():
new_tags.append('#%s' % tag)
hashtags = ' '.join(new_tags)
if len(tweet) > 140:
title = truncate_chars(title, 140-4-len(url))
tweet = '%s %s' % (title, url)
for tag in hashtags.split():
if (len(tweet) + len(tag) + 1) <= 140:
tweet += ' %s' % tag
api = twitter.Api(username=TWITTER_LOGIN, password=TWITTER_PASS)
api.PostUpdates(tweet)
return url
def twitterfy(text):
'''
Parse links, @replies and #hashtags
Source : http://teebes.com/blog/17/simple-python-twitter-rss-feed-parser
'''
text = re.sub(r'(http://(\w|\.|/|\?|=|%|&)+)', \
lambda x: '<a href="%s">%s</a>' % (x.group().strip(), x.group().strip()), text)
text = re.sub(r'@(\w+)', lambda x: '<a href="http://twitter.com/%s">%s</a>' \
% (x.group()[1:], x.group()), text)
text = re.sub(r'#(\w+)', lambda x: '<a href="http://twitter.com/search?q=%%23%s">%s</a>' \
% (x.group()[1:], x.group()), text)
return mark_safe(text) | nephthys/insidetags | functions.py | Python | agpl-3.0 | 5,173 | 0.010632 |
import pytest
from unittest.mock import patch
from borg_summon import config_parser
from .util import mock_globbing, mock_multiple_opens
def test_merge():
d1 = {
'a': 'a',
'b': {
'c': 'c',
'd': [1, 2, 3],
'e': [1, 2, 3],
},
'c': {
'd': 3,
},
'd': 3,
}
d2 = {
'b': {
'c': 'C',
'd': [3, 4, 5],
'e': 0,
},
'c': 0,
'd': 'd',
'g': 'g',
}
res = {
'a': 'a',
'b': {
'c': 'C',
'd': [1, 2, 3, 3, 4, 5],
'e': 0,
},
'c': 0,
'd': 'd',
'g': 'g',
}
config_parser.merge(d1, d2)
assert str(d1) == str(res)
def test_cyclic_include():
mock_globbing()
m_open = mock_multiple_opens([
'include = ["b.toml"]',
'include = ["c.toml"]',
'include = ["a.toml"]',
'include = ["b.toml"]',
])
with patch('borg_summon.config_parser.open', m_open, create=True):
with pytest.raises(config_parser.CyclicIncludeError) as excinfo:
config_parser.get_from_file('a.toml')
assert 'includes itself' in str(excinfo.value)
@patch('logging.warning')
def test_multiple_include(warning):
mock_globbing()
m_open = mock_multiple_opens([
'include = ["b.toml", "c.toml"]',
'include = ["d.toml"]',
'log_level = "info"',
'include = ["d.toml"]',
'log_level = "info"',
])
with patch('borg_summon.config_parser.open', m_open, create=True):
config_parser.get_from_file('a.toml')
assert warning.call_count == 1
assert 'included multiple times' in str(warning.mock_calls[0])
| grensjo/borg-summon | test/test_config_parser.py | Python | mit | 1,894 | 0.000528 |
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del disk command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelDisk(TestBrokerCommand):
def testdelut3c1n3sda(self):
self.noouttest(["del", "disk", "--machine", "ut3c1n3",
"--controller", "scsi", "--size", "68"])
def testdelut3c1n3sdb(self):
self.noouttest(["del", "disk", "--machine", "ut3c1n3",
"--disk", "c0d0"])
def testverifydelut3c1n3sda(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "Disk: sda 68 GB scsi", command)
def testverifydelut3c1n3sdb(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "Disk: c0d0", command)
# This should now list the 34 GB disk that was added previously...
def testverifycatut3c1n3disk(self):
command = "cat --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "harddisks", command)
def testfaildelunknowntype(self):
command = ["del", "disk", "--machine", "ut3c1n3",
"--type", "type-does-not-exist"]
out = self.badrequesttest(command)
self.matchoutput(out,
"type-does-not-exist is not a valid controller type",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelDisk)
unittest.TextTestRunner(verbosity=2).run(suite)
| stdweird/aquilon | tests/broker/test_del_disk.py | Python | apache-2.0 | 2,362 | 0.00127 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-25 18:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='course',
options={'ordering': ['name'], 'verbose_name': 'Curso', 'verbose_name_plural': 'Cursos'},
),
]
| paulopinda/simplemooc | simplemooc/courses/migrations/0002_auto_20160625_1845.py | Python | gpl-2.0 | 454 | 0.002203 |
import re
import urlparse
import datetime
from django.template.defaultfilters import truncatewords
from billy.core import mdb as db, feeds_db, settings
from .base import Document
from .metadata import Metadata
class FeedEntry(Document):
collection = feeds_db.entries
def __init__(self, *args, **kw):
super(FeedEntry, self).__init__(*args, **kw)
def build(self, billy_db=db):
'''Mutate the feed entry with hyperlinked entities. Add tagging
data and other template context values, including source.
'''
self_legislator = self.legislator
entity_types = {'L': 'legislator',
'C': 'committee',
'B': 'bill'}
entry = self
summary = truncatewords(entry['summary'], 50)
entity_strings = entry['entity_strings']
entity_ids = entry['entity_ids']
_entity_strings = []
_entity_ids = []
_entity_urls = []
_done = []
if entity_strings:
data = zip(entity_strings, entity_ids)
data = sorted(data, key=lambda t: len(t[0]), reverse=True)
hyperlinked_spans = []
for entity_string, _id in data:
if entity_string in _done:
continue
else:
_done.append(entity_string)
_entity_strings.append(entity_string)
_entity_ids.append(_id)
# Get this entity's url.
collection_name = entity_types[_id[2]] + 's'
collection = getattr(billy_db, collection_name)
if collection_name == 'legislators':
cursor = collection.find({'_all_ids': _id})
assert cursor.count() == 1
instance = cursor.next()
else:
instance = collection.find_one(_id)
url = instance.get_absolute_url()
_entity_urls.append(url)
# This is tricky. Need to hyperlink the entity without mangling
# other previously hyperlinked strings, like Fiona Ma and
# Mark Leno.
matches = re.finditer(entity_string, summary)
if _id != self_legislator.id:
# For other entities, add a hyperlink.
replacer = lambda m: '<a href="%s">%s</a>' % (
url, entity_string)
else:
# If this id refers to the related legislator, bold it.
replacer = lambda m: '<strong>%s</strong>' % entity_string
for match in matches:
# Only hyperlink if no previous hyperlink has been added
# in the same span.
if any((start <= n < stop) for n in match.span()
for (start, stop) in hyperlinked_spans):
continue
summary = re.sub(entity_string, replacer, summary)
hyperlinked_spans.append(match.span())
# For entity_strings, us modelinstance.display_name strings.
_entity_display_names = []
for _id in _entity_ids:
collection_name = entity_types[_id[2]] + 's'
collection = getattr(billy_db, collection_name)
if collection_name == 'legislators':
cursor = collection.find({'_all_ids': _id})
assert cursor.count() == 1
instance = cursor.next()
else:
instance = collection.find_one(_id)
string = instance.display_name()
_entity_display_names.append(string)
entity_data = zip(_entity_strings, _entity_display_names,
_entity_ids, _entity_urls)
_entity_data = []
seen_display_names = []
for string, display_name, _id, url in entity_data:
if display_name not in seen_display_names:
_entity_data.append((string, display_name, _id, url))
seen_display_names.append(display_name)
entry['summary'] = summary
entry['entity_data'] = _entity_data
entry['id'] = entry['_id']
urldata = urlparse.urlparse(entry['link'])
entry['source'] = urldata.scheme + urldata.netloc
entry['host'] = urldata.netloc
# Prevent obfuscation of `published` method in template rendering.
if 'published' in entry:
del entry['published']
return ''
def display(self):
return self['summary']
def published(self):
if 'published_parsed' in self:
published_parsed = self['published_parsed']
if published_parsed is not None:
return datetime.datetime.fromtimestamp(
self['published_parsed'])
# Try alternative format.
published = self['published']
try:
datetime.datetime.strptime(published, '%b %d %H:%M:%S %Y')
except ValueError:
pass
elif 'updated_parsed' in self:
# Fall back to `updated` date.
return datetime.datetime.fromtimestamp(self['updated_parsed'])
else:
# Let this field be blank.
return
@property
def metadata(self):
return Metadata.get_object(self[settings.LEVEL_FIELD])
| mileswwatkins/billy | billy/models/feeds.py | Python | bsd-3-clause | 5,523 | 0.000362 |
# -*- coding: utf-8 -*-
import os.path as op
import gzip
import io
import struct
from datetime import datetime
from xml.etree import ElementTree
import numpy as np
import scipy.sparse
from aston.resources import cache
from aston.trace.Trace import AstonSeries, AstonFrame
from aston.tracefile.TraceFile import TraceFile, ScanListFile
from aston.spectra.Scan import Scan
class AgilentMS(TraceFile):
ext = 'MS'
mgc = '0132'
traces = ['#ms']
def total_trace(self, twin=None):
#TODO: use twin?
f = open(self.filename, 'rb')
# get number of scans to read in
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
# find the starting location of the data
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
tme = np.zeros(nscans)
tic = np.zeros(nscans)
for i in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
tme[i] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(npos - 4)
tic[i] = struct.unpack('>I', f.read(4))[0]
f.seek(npos)
f.close()
return AstonSeries(tic, tme, name='TIC')
@property
@cache(maxsize=1)
def data(self):
f = open(self.filename, 'rb')
# get number of scans to read in
# note that GC and LC chemstation store this in slightly different
# places
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
dstart = f.tell()
# determine total number of measurements in file
tot_pts = 0
rowst = np.empty(nscans + 1, dtype=int)
rowst[0] = 0
for scn in range(nscans):
# get the position of the next scan
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# keep a running total of how many measurements
tot_pts += (npos - f.tell() - 26) / 4
rowst[scn + 1] = tot_pts
# move forward
f.seek(npos)
# go back to the beginning and load all the other data
f.seek(dstart)
ions = []
i_lkup = {}
cols = np.empty(tot_pts, dtype=int)
vals = np.empty(tot_pts, dtype=np.int32)
times = np.empty(nscans)
for scn in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# the sampling rate is evidentally 60 kHz on all Agilent's MS's
times[scn] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(f.tell() + 12)
npts = rowst[scn + 1] - rowst[scn]
mzs = struct.unpack('>' + npts * 'HH', f.read(npts * 4))
# there's some bug in the numpy implementation that makes this fail
# after the first time
#mzs = np.fromfile(f, dtype='>H', count=npts * 2)
nions = set(mzs[0::2]).difference(i_lkup)
i_lkup.update({ion: i + len(ions) for i, ion in enumerate(nions)})
ions += nions
cols[rowst[scn]:rowst[scn + 1]] = \
[i_lkup[i] for i in mzs[0::2]]
vals[rowst[scn]:rowst[scn + 1]] = mzs[1::2]
f.seek(npos)
f.close()
vals = ((vals & 16383) * 8 ** (vals >> 14)).astype(float)
data = scipy.sparse.csr_matrix((vals, cols, rowst), \
shape=(nscans, len(ions)), dtype=float)
ions = np.array(ions) / 20.
return AstonFrame(data, times, ions)
@property
@cache(maxsize=1)
def old_data(self):
f = open(self.filename, 'rb')
# get number of scans to read in
# note that GC and LC chemstation store this in slightly different
# places
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
# find the starting location of the data
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
# make a list of all of the ions and also read in times
ions = set()
times = np.empty(nscans)
scan_locs = np.empty(nscans, dtype=int)
scan_pts = np.empty(nscans, dtype=int)
for scn in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# the sampling rate is evidentally 60 kHz on all Agilent's MS's
times[scn] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(f.tell() + 6)
npts = struct.unpack('>H', f.read(2))[0]
# jump to the data and save relevant parameters for later
f.seek(f.tell() + 4)
scan_locs[scn] = f.tell()
scan_pts[scn] = npts
#TODO: use numpy.fromfile?
nions = np.fromfile(f, dtype='>H', count=npts * 2)[0::2]
if scn < 2:
print(npts)
print(nions)
#nions = struct.unpack('>' + npts * 'HH', f.read(npts * 4))[0::2]
ions.update(nions)
f.seek(npos)
ions = np.array(sorted(list(ions)))
data = np.empty((len(times), len(ions)), dtype=float)
for scn in range(nscans):
f.seek(scan_locs[scn])
#TODO: use numpy.fromfile?
mzs = np.fromfile(f, dtype='>H', count=scan_pts[scn] * 2)
#mzs = np.array(struct.unpack('>' + npts * 'HH', f.read(npts * 4)))
if len(mzs) == 0:
continue
ilocs = np.searchsorted(ions, mzs[0::2])
abn = (mzs[1::2] & 16383) * 8 ** (mzs[1::2] >> 14)
data[scn][ilocs] = abn
f.close()
ions /= 20.
return AstonFrame(data, times, ions)
@property
def info(self):
d = super(AgilentMS, self).info
f = open(self.filename, 'rb')
f.seek(0x18)
d['name'] = f.read(struct.unpack('>B', f.read(1))[0]).decode().strip()
f.seek(0x94)
d['operator'] = f.read(struct.unpack('>B', f.read(1))[0]).decode()
f.seek(0xE4)
d['method'] = \
f.read(struct.unpack('>B', f.read(1))[0]).decode().strip()
f.seek(0xB2)
rawdate = f.read(struct.unpack('>B', f.read(1))[0]).decode()
try:
d['date'] = datetime.strptime(rawdate, \
"%d %b %y %H:%M %p").isoformat(' ')
except ValueError:
pass # date is not in correct format to parse?
#TODO: vial number in here too?
f.close()
#TODO: fill this out
## read info from the acqmeth.txt file
#fname = op.join(op.dirname(self.filename), 'acqmeth.txt')
return d
class AgilentMSMSScan(ScanListFile):
ext = 'BIN'
mgc = '0101'
traces = ['#ms']
# TODO: __init__ method that adds mrm trace names to traces
def _scan_iter(self, keylist):
f = open(self.filename, 'rb')
r = ElementTree.parse(op.splitext(self.filename)[0] + '.xsd').getroot()
xml_to_struct = {'xs:int': 'i', 'xs:long': 'q', 'xs:short': 'h', \
'xs:byte': 'b', 'xs:double': 'd', 'xs:float': 'f'}
rfrmt = {}
for n in r.getchildren():
name = n.get('name')
for sn in n.getchildren()[0].getchildren():
if rfrmt.get(name, None) is None:
rfrmt[name] = []
sname = sn.get('name')
stype = sn.get('type')
rfrmt[name].append((sname, xml_to_struct.get(stype, stype)))
def resolve(lookup, recname):
names = [i[0] for i in lookup[recname]]
frmts = [i[1] for i in lookup[recname]]
flatnames = []
flatfrmts = ''
for n, f in zip(names, frmts):
if len(f) != 1:
n, f = resolve(lookup, f)
flatnames += n
else:
flatnames.append(n)
flatfrmts += f
return flatnames, flatfrmts
fnames, ffrmts = resolve(rfrmt, 'ScanRecordType')
rec_str = '<' + ffrmts
sz = struct.calcsize(rec_str)
f.seek(0x58)
start_offset = struct.unpack('<i', f.read(4))[0]
f.seek(start_offset)
loc = [fnames.index(k) for k in keylist]
while True:
try:
data = struct.unpack(rec_str, f.read(sz))
except struct.error:
break
yield (data[l] for l in loc)
f.close()
def total_trace(self, twin=None):
if twin is None:
twin = (-np.inf, np.inf)
tme = []
tic = []
for t, z in self._scan_iter(['ScanTime', 'TIC']):
if t < twin[0]:
continue
elif t > twin[1]:
break
tme.append(t)
tic.append(z)
return AstonSeries(np.array(tic), np.array(tme), name='TIC')
#TODO: set .twin(twin) bounds on this
def scans(self, twin=None):
if twin is None:
twin = (-np.inf, np.inf)
#super hack-y way to disable checksum and length checking
gzip.GzipFile._read_eof = lambda _: None
# standard prefix for every zip chunk
gzprefix = b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x04\x00'
def uncompress(d):
return gzip.GzipFile(fileobj=io.BytesIO(gzprefix + d)).read()
f = open(op.join(op.split(self.filename)[0], 'MSProfile.bin'), 'rb')
flds = ['ScanTime', 'SpectrumFormatID', 'SpectrumOffset', \
'ByteCount', 'PointCount', 'MinX', 'MaxX']
for t, fmt, off, bc, pc, minx, maxx in self._scan_iter(flds):
if t < twin[0]:
continue
if t > twin[1]:
break
f.seek(off)
if fmt == 1:
# this record is compressed with gz
profdata = uncompress(f.read(bc))
pd = np.array(struct.unpack('dd' + pc * 'i', profdata)[2:])
elif fmt == 2:
profdata = f.read(bc)
pd = np.array(struct.unpack('dd' + pc * 'f', profdata)[2:])
else:
raise NotImplementedError('Unknown Agilent MH Scan format')
#TODO: probably not a good approximation?
ions = np.linspace(minx, maxx, len(pd))
yield Scan(ions, pd, name=t)
f.close()
def mrm_trace(self, parent=None, daughter=None, tol=0.5, twin=None):
if twin is None:
twin = (-np.inf, np.inf)
tme, ic = [], []
for t, off, bc, pc, minx, maxx, d_mz, p_mz, z in self._scan_iter( \
['ScanTime', 'SpectrumOffset', 'ByteCount', 'PointCount', \
'MinX', 'MaxX', 'BasePeakMZ', 'MzOfInterest', 'TIC']):
if t < twin[0]:
continue
elif t > twin[1]:
break
if parent is not None:
if np.abs(parent - p_mz) > tol:
continue
if daughter is not None:
if np.abs(daughter - d_mz) > tol:
continue
tme.append(t)
ic.append(z)
return AstonSeries(np.array(ic), np.array(tme), \
name=str(str(parent) + '→' + str(daughter)))
| molliewebb/aston | aston/tracefile/AgilentMS.py | Python | gpl-3.0 | 11,532 | 0.002168 |
"""
Test parsing of complex date and times
"""
import unittest, time, datetime
import parsedatetime as pdt
class test(unittest.TestCase):
@pdt.tests.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return pdt.tests.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testDates(self):
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(2006, 8, 25, 17, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('08/25/2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm on 08.25.2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm August 25, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm August 25th, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25 August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25th August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25th, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25 Aug, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25th Aug 2006, 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 17, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 17, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('8/5 at 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 8.5', start), (target, 3))
self.assertExpectedResult(self.cal.parse('08/05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm Aug 05', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05th 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5 August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5th August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 05 Aug', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05 Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05th Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 12, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 12, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('August 5th 12pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 12:00', start), (target, 3))
if __name__ == "__main__":
unittest.main()
| r3tard/BartusBot | lib/parsedatetime/tests/TestComplexDateTimes.py | Python | apache-2.0 | 3,487 | 0.009464 |
# -*- coding: utf-8 -*-
#
# This file is part of spectrumctl. See spectrumctl.py for a description.
#
# Copyright (C) 2009, 2010 Mathias Ertl
#
# Spectrumctl is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
__all__ = [ 'spectrum', 'spectrumconfigparser', 'env', 'ExistsError', 'config_interface', 'spectrum_group' ]
| hanzz/spectrum | spectrumctl/spectrum/__init__.py | Python | gpl-2.0 | 883 | 0.00453 |
'''
Created on 19 nov. 2015
@author: Bertrand Verdu
'''
if __name__ == '__main__':
pass | bverdu/onDemand | onDemand/plugins/zigbee/light.py | Python | agpl-3.0 | 95 | 0.021053 |
import os
from setuptools import find_packages, setup
from pip.req import parse_requirements
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt', session=False)
# reqs is a list of requirements
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='leverage',
version='0.0.1',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
license='APGLv3 License', # example license
description='',
long_description=README,
url='',
author='',
author_email='',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| eamoncaddigan/Leverage | setup.py | Python | agpl-3.0 | 1,490 | 0.000671 |
#!/usr/bin/env python3
import json
import os
import unittest
import requests
AGNOS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
MANIFEST = os.path.join(AGNOS_DIR, "agnos.json")
class TestAgnosUpdater(unittest.TestCase):
def test_manifest(self):
with open(MANIFEST) as f:
m = json.load(f)
for img in m:
r = requests.head(img['url'])
r.raise_for_status()
self.assertEqual(r.headers['Content-Type'], "application/x-xz")
if not img['sparse']:
assert img['hash'] == img['hash_raw']
if __name__ == "__main__":
unittest.main()
| commaai/openpilot | selfdrive/hardware/tici/test_agnos_updater.py | Python | mit | 595 | 0.011765 |
# coding=utf-8
import functools
import re
import stat
import sys
if sys.version_info >= (3,0):
import io
StringIO = io.StringIO
else:
import cStringIO
StringIO = cStringIO.StringIO
import csbuild
from . import log
try:
from PyQt5 import QtCore, QtGui, QtWidgets
QMainWindow = QtWidgets.QMainWindow
QApplication = QtWidgets.QApplication
QtGui.QAbstractItemView = QtWidgets.QAbstractItemView
QtGui.QAction = QtWidgets.QAction
QtGui.QApplication = QtWidgets.QApplication
QtGui.QHBoxLayout = QtWidgets.QHBoxLayout
QtGui.QHeaderView = QtWidgets.QHeaderView
QtGui.QLabel = QtWidgets.QLabel
QtGui.QLineEdit = QtWidgets.QLineEdit
QtGui.QMainWindow = QtWidgets.QMainWindow
QtGui.QMenu = QtWidgets.QMenu
QtGui.QMessageBox = QtWidgets.QMessageBox
QtGui.QPlainTextEdit = QtWidgets.QPlainTextEdit
QtGui.QProgressBar = QtWidgets.QProgressBar
QtGui.QPushButton = QtWidgets.QPushButton
QtGui.QSpacerItem = QtWidgets.QSpacerItem
QtGui.QSizePolicy = QtWidgets.QSizePolicy
QtGui.QSlider = QtWidgets.QSlider
QtGui.QSplitter = QtWidgets.QSplitter
QtGui.QStatusBar = QtWidgets.QStatusBar
QtGui.QStyledItemDelegate = QtWidgets.QStyledItemDelegate
QtGui.QTextEdit = QtWidgets.QTextEdit
QtGui.QTreeWidget = QtWidgets.QTreeWidget
QtGui.QTreeWidgetItem = QtWidgets.QTreeWidgetItem
QtGui.QTabWidget = QtWidgets.QTabWidget
QtGui.QToolTip = QtWidgets.QToolTip
QtGui.QVBoxLayout = QtWidgets.QVBoxLayout
QtGui.QWidget = QtWidgets.QWidget
log.LOG_INFO("Using Qt5")
USING_PYQT5 = True
except:
try:
from PyQt4 import QtCore, QtGui
QMainWindow = QtGui.QMainWindow
QApplication = QtGui.QApplication
log.LOG_INFO("Using Qt4")
USING_PYQT5 = False
except:
log.LOG_ERROR("Either PyQt4 or PyQt5 must be installed on your system to load the CSBuild GUI")
csbuild.Exit( 1 )
import os
import threading
import time
import math
import signal
from . import _shared_globals
class TreeWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self, *args, **kwargs):
QtGui.QTreeWidgetItem.__init__(self, *args, **kwargs)
self.numericColumns = set()
def setColumnNumeric(self, col):
self.numericColumns.add(col)
def __lt__(self, other):
if self.parent():
return False
sortCol = self.treeWidget().sortColumn()
numericColumns = self.treeWidget().headerItem().numericColumns
try:
if sortCol in numericColumns:
myNumber = float(self.text(sortCol))
otherNumber = float(other.text(sortCol))
return myNumber > otherNumber
except:
pass
myText = str(self.text(sortCol))
otherText = str(other.text(sortCol))
return myText > otherText
class TreeWidgetWithBarGraph(QtGui.QTreeWidgetItem):
def __init__(self, parent, renderParent, isFile):
QtGui.QTreeWidgetItem.__init__(self, parent)
self.numericColumns = set()
self.startTime = -1
self.buildEnd = -1
self.linkQueueStart = -1
self.linkStart = -1
self.endTime = -1
self.isFile = isFile
self.m_childrenShowing = False
self.renderParent = renderParent
self.lastUpdate = 0
def setChildrenShowing(self, showing):
self.m_childrenShowing = showing
def childrenShowing(self):
return self.m_childrenShowing
def setStartTime(self, startTime):
self.startTime = startTime
self.lastUpdate = time.time()
def setBuildEnd(self, buildEnd):
self.buildEnd = buildEnd
def setLinkStart(self, linkStart):
self.linkStart = linkStart
def setLinkQueueStart(self, linkQueueStart):
self.linkQueueStart = linkQueueStart
def setEndTime(self, endTime):
self.endTime = endTime
def draw(self, painter):
rect = self.renderParent.visualItemRect(self)
def drawBar(color, startTime, endTime):
if startTime != -1:
if endTime == -1:
endTime = self.lastUpdate
topLeft = rect.topLeft()
if topLeft.y() < 0:
return
bottomRight = rect.bottomRight()
xoffset = 24
if self.isFile:
xoffset += 20
topLeft.setX(topLeft.x() + (250-xoffset) + math.floor((startTime - _shared_globals.starttime) * 30))
topLeft.setY(topLeft.y())
bottomRight.setX(topLeft.x() + math.ceil((endTime - startTime) * 30))
bottomRight.setY(topLeft.y() + rect.height() - 2)
drawRect = QtCore.QRect(topLeft, bottomRight)
brush = painter.brush()
painter.setBrush(QtGui.QColor(color))
painter.drawRect(drawRect)
painter.setBrush(brush)
if self.isFile:
drawBar("#FF4000", self.startTime, self.buildEnd)
else:
drawBar("#0040FF", self.startTime, self.buildEnd)
drawBar("#008080", self.buildEnd, self.linkQueueStart)
drawBar("#00C0C0", self.linkQueueStart, self.linkStart)
drawBar("#00E080", self.linkStart, self.endTime)
class SyntaxHighlighter( QtGui.QSyntaxHighlighter ):
class HighlightRule( object ):
def __init__(self, pattern, argument):
self.pattern = pattern
self.format = argument
def __init__(self, *args):
QtGui.QSyntaxHighlighter.__init__(self, *args)
self.highlightRules = []
self.commentStart = re.compile("/\\*")
self.commentEnd = re.compile("\\*/")
self.keywordFormat = QtGui.QTextCharFormat()
self.commentFormat = QtGui.QTextCharFormat()
self.stringFormat = QtGui.QTextCharFormat()
self.functionFormat = QtGui.QTextCharFormat()
self.keywordFormat.setForeground(QtGui.QColor("#800000"))
self.keywordFormat.setFontWeight(QtGui.QFont.Bold)
for pattern in [
"\\b__alignof\\b",
"\\b__asm\\b",
"\\b__assume\\b",
"\\b__based\\b",
"\\b__box\\b",
"\\b__cdecl\\b",
"\\b__declspec\\b",
"\\b__delegate\\b",
"\\b__event\\b",
"\\b__except\\b",
"\\b__fastcall\\b",
"\\b__finally\\b",
"\\b__forceinline\\b",
"\\b__gc\\b",
"\\b__hook\\b",
"\\b__identifier\\b",
"\\b__if_exists\\b",
"\\b__if_not_exists\\b",
"\\b__inline\\b",
"\\b__int16\\b",
"\\b__int32\\b",
"\\b__int64\\b",
"\\b__int8\\b",
"\\b__interface\\b",
"\\b__leave\\b",
"\\b__m128\\b",
"\\b__m128d\\b",
"\\b__m128i\\b",
"\\b__m64\\b",
"\\b__multiple_inheritance\\b",
"\\b__nogc\\b",
"\\b__noop\\b",
"\\b__pin\\b",
"\\b__property\\b",
"\\b__raise\\b",
"\\b__restrict\\b",
"\\b__single_inheritance\\b",
"\\b__stdcall\\b",
"\\b__super\\b",
"\\b__thiscall\\b",
"\\b__try\\b",
"\\b__try_cast\\b",
"\\b__unaligned\\b",
"\\b__uuidof\\b",
"\\b__value\\b",
"\\b__virtual_inheritance\\b",
"\\b__w64\\b",
"\\b__wchar_t\\b",
"\\babstract\\b",
"\\barray\\b",
"\\balignas\\b",
"\\balignof\\b",
"\\band\\b",
"\\band_eq\\b",
"\\basm\\b",
"\\bauto\\b",
"\\bbitand\\b",
"\\bbitor\\b",
"\\bbool\\b",
"\\bbreak\\b",
"\\bcase\\b",
"\\bcatch\\b",
"\\bchar\\b",
"\\bchar16_t\\b",
"\\bchar32_t\\b",
"\\bclass\\b",
"\\bcompl\\b",
"\\bconst\\b",
"\\bconst_cast\\b",
"\\bconstexpr\\b",
"\\bcontinue\\b",
"\\bdecltype\\b",
"\\bdefault\\b",
"\\bdelegate\\b",
"\\bdelete\\b",
"\\bdeprecated\\b",
"\\bdllexport\\b",
"\\bdllimport\\b",
"\\bdo\\b",
"\\bdouble\\b",
"\\bdynamic_cast\\b",
"\\belse\\b",
"\\benum\\b",
"\\bevent\\b",
"\\bexplicit\\b",
"\\bexport\\b",
"\\bextern\\b",
"\\bfalse\\b",
"\\bfinal\\b",
"\\bfinally\\b",
"\\bfloat\\b",
"\\bfor\\b",
"\\bfor each\\b",
"\\bfriend\\b",
"\\bfriend_as\\b",
"\\bgcnew\\b",
"\\bgeneric\\b",
"\\bgoto\\b",
"\\bif\\b",
"\\bin\\b",
"\\binitonly\\b",
"\\binline\\b",
"\\bint\\b",
"\\bint16_t\\b",
"\\bint32_t\\b",
"\\bint64_t\\b",
"\\bint8_t\\b",
"\\binterface\\b",
"\\binterior_ptr\\b",
"\\bliteral\\b",
"\\blong\\b",
"\\bmutable\\b",
"\\bnaked\\b",
"\\bnamespace\\b",
"\\bnew\\b",
"\\bnoexcept\\b",
"\\bnoinline\\b",
"\\bnoreturn\\b",
"\\bnot\\b",
"\\bnot_eq\\b",
"\\bnothrow\\b",
"\\bnovtable\\b",
"\\bNULL\\b",
"\\bnullptr\\b",
"\\bnullptr_t\\b",
"\\boperator\\b",
"\\bor\\b",
"\\bor_eq\\b",
"\\boverride\\b",
"\\bproperty\\b",
"\\bprivate\\b",
"\\bprotected\\b",
"\\bpublic\\b",
"\\braise\\b",
"\\bref\\b",
"\\bregister\\b",
"\\breinterpret_cast\\b",
"\\brestrict\\b",
"\\breturn\\b",
"\\bsafecast\\b",
"\\bsealed\\b",
"\\bselectany\\b",
"\\bshort\\b",
"\\bsignals\\b",
"\\bsigned\\b",
"\\bsize_t\\b",
"\\bsizeof\\b",
"\\bslots\\b",
"\\bstatic\\b",
"\\bstatic_assert\\b",
"\\bstatic_cast\\b",
"\\bstruct\\b",
"\\bswitch\\b",
"\\btemplate\\b",
"\\btypedef\\b",
"\\btypename\\b",
"\\bthis\\b",
"\\bthread\\b",
"\\bthread_local\\b",
"\\bthrow\\b",
"\\btrue\\b",
"\\btry\\b",
"\\btypeid\\b",
"\\buint16_t\\b",
"\\buint32_t\\b",
"\\buint64_t\\b",
"\\buint8_t\\b",
"\\bunion\\b",
"\\bunsigned\\b",
"\\busing\\b",
"\\buuid\\b",
"\\bvalue\\b",
"\\bvirtual\\b",
"\\bvoid\\b",
"\\bvolatile\\b",
"\\bwchar_t\\b",
"\\bwhile\\b",
"\\bxor\\b",
"\\bxor_eq\\b",
]:
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile(pattern), self.keywordFormat))
#self.functionFormat.setForeground(QtCore.Qt.darkMagenta)
#self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile("\\b[A-Za-z0-9_]+(?=\\()"), self.functionFormat))
self.numberFormat = QtGui.QTextCharFormat()
self.numberFormat.setForeground(QtGui.QColor("#008c00"))
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile("\\b\d+\\b"), self.numberFormat))
self.symbolFormat = QtGui.QTextCharFormat()
self.symbolFormat.setForeground(QtGui.QColor("#808030"))
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile(r"[\[\]\+\=\-\*\/\(\)\{\}\;\,\.\<\>\?\&\^\%\!\~\|]"), self.symbolFormat))
self.commentFormat.setForeground(QtGui.QColor("#696969"))
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile("//[^\n]*"), self.commentFormat))
self.preprocessorFormat = QtGui.QTextCharFormat()
self.preprocessorFormat.setForeground(QtGui.QColor("#004a43"))
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile("^\s*#.*$"), self.preprocessorFormat))
self.stringFormat.setForeground(QtCore.Qt.darkCyan)
self.highlightRules.append(SyntaxHighlighter.HighlightRule(re.compile("\".*?\""), self.stringFormat))
def highlightBlock(self, line):
for rule in self.highlightRules:
match = rule.pattern.search(line)
while match:
start, end = match.span()
length = end - start
self.setFormat(start, length, rule.format)
match = rule.pattern.search(line, end)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
match = self.commentStart.search(line)
if match:
startIndex = match.start()
else:
startIndex = -1
while startIndex >= 0:
endIndex = -1
match = self.commentEnd.search(line, startIndex)
if match:
endIndex = match.end()
length = -1
if endIndex == -1:
self.setCurrentBlockState(1)
length = len(line) - startIndex
else:
length = endIndex - startIndex
self.setFormat(startIndex, length, self.commentFormat)
match = self.commentStart.search(line, startIndex + length)
if match:
startIndex = match.start()
else:
startIndex = -1
class LineNumberArea( QtGui.QWidget ):
def __init__(self, editor):
QtGui.QWidget.__init__(self, editor)
self.editor = editor
self.buttonDown = False
def sizeHint(self):
return QtCore.QSize(self.editor.lineNumberAreaWidth(), 0)
def paintEvent(self, event):
self.editor.lineNumberAreaPaintEvent(event)
def mouseMoveEvent(self, event):
if self.buttonDown:
self.editor.sideBarMousePress(event)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.buttonDown = True
self.editor.sideBarMousePress(event)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.buttonDown = False
class CodeEditor( QtGui.QPlainTextEdit ):
def __init__(self, parent, parentEditor, project, directory = None):
QtGui.QPlainTextEdit.__init__(self, parent)
self.parentEditor = parentEditor
self.project = project
font = QtGui.QFont()
font.setFamily("monospace")
font.setFixedPitch(True)
font.setPointSize(10)
metrics = QtGui.QFontMetrics(font)
self.setTabStopWidth(4 * metrics.width(' '))
self.setFont(font)
self.sideBar = LineNumberArea(self)
self.cursorPositionChanged.connect(self.highlightCurrentLine)
self.blockCountChanged.connect(self.updateLineNumberAreaWidth)
self.updateRequest.connect(self.updateLineNumberArea)
self.updateLineNumberAreaWidth(0)
self.highlightCurrentLine()
def lineNumberAreaPaintEvent(self, event):
painter = QtGui.QPainter(self.sideBar)
painter.fillRect(event.rect(), QtCore.Qt.lightGray)
block = self.firstVisibleBlock()
blockNum = block.blockNumber()
top = int(self.blockBoundingGeometry(block).translated(self.contentOffset()).top())
bottom = top + int(self.blockBoundingRect(block).height())
while block.isValid() and top <= event.rect().bottom():
if block.isVisible and bottom >= event.rect().top():
number = str(blockNum + 1)
painter.setPen(QtCore.Qt.black)
painter.drawText(0, top, self.sideBar.width(), self.fontMetrics().height(), QtCore.Qt.AlignRight, number)
block = block.next()
top = bottom
bottom = top + int(self.blockBoundingRect(block).height())
blockNum += 1
def lineNumberAreaWidth(self):
digits = 1
maxDigits = max(1, self.blockCount())
while maxDigits >= 10:
maxDigits /= 10
digits += 1
space = 3 + self.fontMetrics().width("9") * digits
return space
def resizeEvent(self, event):
QtGui.QPlainTextEdit.resizeEvent(self, event)
cr = self.contentsRect()
self.sideBar.setGeometry(QtCore.QRect(cr.left(), cr.top(), self.lineNumberAreaWidth(), cr.height()))
def updateLineNumberAreaWidth(self, blockCount):
self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0)
def highlightCurrentLine(self):
extraSelections = []
lineColor = "#DDEDEC"
selection = QtGui.QTextEdit.ExtraSelection()
selection.format.setBackground(QtGui.QColor(lineColor))
selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
extraSelections.append(selection)
self.setExtraSelections(extraSelections)
def updateLineNumberArea(self, rect, num):
if num:
self.sideBar.scroll(0, num)
else:
self.sideBar.update(0, rect.y(), self.sideBar.width(), rect.height())
if rect.contains(self.viewport().rect()):
self.updateLineNumberAreaWidth(0)
def sideBarMousePress(self, event):
pass
class CodeProfileDisplay(CodeEditor):
def __init__(self, parent, parentEditor, project, directory):
self.visualizationWidth = 15
CodeEditor.__init__(self, parent, parentEditor, project)
self.directory = directory
self.setReadOnly(True)
self.vals = []
self.highVal = 0.0
self.setMouseTracking(True)
self.selections = []
self.mousePos = None
self.mouseGlobalPos = None
self.maxVal = 0.0
self.settingValue = False
def keyPressEvent(self, event):
if not self.mousePos:
return
if event.key() == QtCore.Qt.Key_Control:
mouseEvent = QtGui.QMouseEvent(
QtCore.QEvent.MouseMove,
self.mousePos,
self.mouseGlobalPos,
QtCore.Qt.NoButton,
QtCore.Qt.NoButton,
QtGui.QApplication.keyboardModifiers()
)
self.mouseMoveEvent(mouseEvent)
def keyReleaseEvent(self, event):
if not self.mousePos:
return
if event.key() == QtCore.Qt.Key_Control:
mouseEvent = QtGui.QMouseEvent(
QtCore.QEvent.MouseMove,
self.mousePos,
self.mouseGlobalPos,
QtCore.Qt.NoButton,
QtCore.Qt.NoButton,
QtGui.QApplication.keyboardModifiers()
)
self.mouseMoveEvent(mouseEvent)
def mouseMoveEvent(self, event):
cursor = self.cursorForPosition(event.pos())
block = cursor.block()
line = str(block.text())
RMatch = re.search( r"#\s*include\s*[<\"](.*?)[\">]", line )
if RMatch:
extraSelections = list(self.selections)
selection = QtGui.QTextEdit.ExtraSelection()
selection.format.setFontUnderline(True)
modifiers = QtGui.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
selection.format.setForeground(QtGui.QColor("#0000FF"))
selection.format.setFontWeight(QtGui.QFont.Bold)
QApplication.setOverrideCursor(QtCore.Qt.PointingHandCursor)
QtGui.QToolTip.showText(event.globalPos(), "", self)
else:
QtGui.QToolTip.showText(event.globalPos(), "Ctrl+click to open profile view for {}".format(RMatch.group(1)), self)
QApplication.restoreOverrideCursor()
selection.cursor = QtGui.QTextCursor(self.document())
selection.cursor.movePosition(QtGui.QTextCursor.Down, QtGui.QTextCursor.MoveAnchor, block.blockNumber())
selection.cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.MoveAnchor, RMatch.start())
selection.cursor.clearSelection()
selection.cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor, RMatch.end() - RMatch.start())
extraSelections.append(selection)
self.setExtraSelections(extraSelections)
self.mousePos = event.pos()
self.mouseGlobalPos = event.globalPos()
else:
QtGui.QToolTip.showText(event.globalPos(), "", self)
self.setExtraSelections(self.selections)
QApplication.restoreOverrideCursor()
self.mousePos = None
self.mouseGlobalPos = None
def highlightCurrentLine(self):
pass
def sideBarMousePress(self, event):
if event.pos().x() <= self.visualizationWidth:
totalLines = self.blockCount()
pct = float(event.pos().y()) / self.sideBar.rect().height()
cursor = self.textCursor()
block = cursor.block()
blockNo = block.blockNumber()
desiredBlockNo = int(totalLines * pct)
if blockNo > desiredBlockNo:
cursor.movePosition(QtGui.QTextCursor.Up, QtGui.QTextCursor.MoveAnchor, blockNo - desiredBlockNo)
else:
cursor.movePosition(QtGui.QTextCursor.Down, QtGui.QTextCursor.MoveAnchor, desiredBlockNo - blockNo)
self.setTextCursor(cursor)
self.centerCursor()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and event.modifiers() == QtCore.Qt.ControlModifier:
cursor = self.cursorForPosition(event.pos())
block = cursor.block()
line = str(block.text())
RMatch = re.search( r"#\s*include\s*[<\"](.*?)[\">]", line )
if RMatch:
includeFile = RMatch.group(1)
project = self.project
#First try: Absolute path relative to base file directory.
absPath = os.path.abspath(os.path.join(self.directory, includeFile))
if not os.access(absPath, os.F_OK):
#Second try: Look in the project's include directories.
for directory in project.includeDirs:
absPath = os.path.abspath(os.path.join(directory, includeFile))
if os.access(absPath, os.F_OK):
break
if not os.access(absPath, os.F_OK):
#Third try, brute force it against the filemap our parent has for us.
base = os.path.basename(includeFile)
if base in self.parentEditor.filemap:
options = self.parentEditor.filemap[base]
if len(options) == 1:
absPath = list(options)[0]
else:
log.LOG_ERROR("TODO: Multiple options exist for header {}: {}".format(includeFile, options))
return
else:
return
with open(absPath, "r") as f:
data = f.read().split("\n")
io = StringIO()
absPath = os.path.normcase(absPath)
baseFile = self.parentEditor.sourceFile
lineNo = 1
for line in data:
lineTime = 0.0
if lineNo in project.times[baseFile][absPath]:
lineTime = project.times[baseFile][absPath][lineNo]
io.write("{: 9.6f}\t\t{}\n".format(lineTime, line))
lineNo += 1
data = io.getvalue()
io.close()
window = EditorWindow(baseFile, 0, 0, CodeProfileDisplay, self, project=project, directory=os.path.dirname(absPath), data=data, filemap=self.parentEditor.filemap, baseFile=os.path.basename(absPath))
window.show()
def setPlainText(self, text):
CodeEditor.setPlainText(self, text)
text = text.split("\n")
class VisMode:
Mean = 1
HighVal = 3
Constant = 4
mode = VisMode.Mean
skipIncludes = True
maxVal = 0.0
for line in text:
if not line.strip():
continue
val = float(line.split('\t')[0])
maxVal = max(maxVal, val)
self.maxVal = maxVal
self.parentEditor.slider.setMaximum(self.toLog(maxVal))
self.parentEditor.slider.setMinimum(1)
if mode == VisMode.Mean:
highVal = 0.0
num = 0
for line in text:
if not line.strip():
continue
if skipIncludes:
RMatch = re.search( r"#\s*include\s*[<\"](.*?)[\">]", line )
if RMatch:
continue
val = float(line.split('\t')[0])
highVal += val
num += 1
if num == 0:
return
highVal /= num
highVal *= 2
if not highVal:
for line in text:
if not line.strip():
continue
val = float(line.split('\t')[0])
highVal += val
num += 1
if num == 0:
return
highVal /= num
highVal *= 2
elif mode == VisMode.HighVal:
highVal = 0.0
for line in text:
if not line.strip():
continue
if skipIncludes:
RMatch = re.search( r"#\s*include\s*[<\"](.*?)[\">]", line )
if RMatch:
continue
val = float(line.split('\t')[0])
highVal = max(highVal, val)
if not highVal:
for line in text:
if not line.strip():
continue
val = float(line.split('\t')[0])
highVal = max(highVal, val)
elif mode == VisMode.Constant:
highVal = 0.01
if not highVal:
return
self.highVal = highVal
self.settingValue = True
self.parentEditor.slider.setValue(self.toLog(highVal))
self.settingValue = False
self.parentEditor.textBox.setText("{:f}".format(highVal))
self.highlightProblemAreas(text)
def toLog(self, val):
normalized = float(val)/self.maxVal
return int(round(math.sqrt(normalized) * 1000))
def fromLog(self, val):
if val == 0:
return 0
val = float(val)/1000.0
return val * val * self.maxVal
def sliderMoved(self, value):
if self.settingValue:
return
self.highVal = self.fromLog(value)
self.parentEditor.textBox.setText("{:f}".format(self.highVal))
if not self.parentEditor.slider.isSliderDown():
text = str(self.toPlainText())
self.highlightProblemAreas(text.split("\n"))
def textEdited(self):
try:
val = float(self.parentEditor.textBox.text())
except:
self.parentEditor.textBox.setText("{:f}".format(self.highVal))
else:
if val <= 0.0:
self.parentEditor.textBox.setText("{:f}".format(self.highVal))
return
if val > self.maxVal:
val = self.maxVal
self.parentEditor.textBox.setText("{:f}".format(val))
self.highVal = val
self.settingValue = True
self.parentEditor.slider.setValue(self.toLog(self.highVal))
self.settingValue = False
text = str(self.toPlainText())
self.highlightProblemAreas(text.split("\n"))
def sliderReleased(self):
self.highVal = self.fromLog(self.parentEditor.slider.value())
text = str(self.toPlainText())
self.highlightProblemAreas(text.split("\n"))
def highlightProblemAreas(self, text):
extraSelections = []
self.vals = []
lineNo = 0
for line in text:
if not line.strip():
continue
val = float(line.split('\t')[0])
if val > self.highVal:
val = self.highVal
selection = QtGui.QTextEdit.ExtraSelection()
gbVals = 255 - math.ceil(255 * (val/self.highVal))
selection.format.setBackground(QtGui.QColor(255, gbVals, gbVals))
selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, True)
selection.cursor = QtGui.QTextCursor(self.document())
selection.cursor.movePosition(QtGui.QTextCursor.Down, QtGui.QTextCursor.MoveAnchor, lineNo)
selection.cursor.clearSelection()
extraSelections.append(selection)
lineNo += 1
self.vals.append(val)
self.selections = extraSelections
self.setExtraSelections(extraSelections)
def lineNumberAreaWidth(self):
return self.visualizationWidth + CodeEditor.lineNumberAreaWidth(self)
def lineNumberAreaPaintEvent(self, event):
painter = QtGui.QPainter(self.sideBar)
painter.fillRect(event.rect(), QtCore.Qt.lightGray)
width = self.visualizationWidth
visualHeight = self.sideBar.rect().height()
height = min(visualHeight, len(self.vals))
image = QtGui.QImage(width, height, QtGui.QImage.Format_RGB32)
image.fill(QtGui.qRgb(255, 255, 255))
lineNo = 0
for val in self.vals:
y = int(lineNo * (float(height) / float(len(self.vals))))
color = QtGui.QColor(image.pixel(0, y))
gbVal = min(255 - int(math.ceil((val / self.highVal) * 255)), color.blue())
onColor = QtGui.qRgb(255, gbVal, gbVal)
for x in range(width):
image.setPixel(x, y, onColor)
lineNo += 1
block = self.firstVisibleBlock()
blockNum = block.blockNumber()
top = int(self.blockBoundingGeometry(block).translated(self.contentOffset()).top())
bottom = top + int(self.blockBoundingRect(block).height())
topLeft = self.sideBar.rect().topLeft()
bottomRight = self.sideBar.rect().bottomRight()
bottomRight.setX(self.visualizationWidth)
rect = QtCore.QRect(topLeft, bottomRight)
painter.drawImage(rect, image, image.rect())
image2 = QtGui.QImage(self.sideBar.rect().width(), self.sideBar.rect().height(), QtGui.QImage.Format_ARGB32)
firstNum = -1
lastNum = -1
while block.isValid() and top <= self.rect().bottom():
if block.isVisible() and bottom >= self.rect().top():
if firstNum == -1:
firstNum = blockNum
lastNum = blockNum + 1
block = block.next()
top = bottom
bottom = top + int(self.blockBoundingRect(block).height())
blockNum += 1
mult = float(self.sideBar.rect().height())/float(len(self.vals))
fillColor = QtGui.qRgba(192, 192, 192, 64)
onColor = QtGui.qRgba(64, 64, 64, 127)
offColor = QtGui.qRgba(127, 127, 127, 127)
image2.fill(offColor)
startPixel = int(math.floor(firstNum * mult))
endPixel = min(int(math.ceil(lastNum * mult)) - 1, self.sideBar.rect().height() - 1)
for i in range(startPixel, endPixel):
for j in range(self.sideBar.rect().width()):
image2.setPixel(j, i, fillColor)
image2.setPixel(0, i, onColor)
image2.setPixel(1, i, onColor)
image2.setPixel(self.sideBar.width()-2, i, onColor)
image2.setPixel(self.sideBar.width()-1, i, onColor)
for i in range(self.sideBar.rect().width()):
image2.setPixel(i, startPixel, onColor)
image2.setPixel(i, endPixel, onColor)
image2.setPixel(i, startPixel + 1, onColor)
image2.setPixel(i, endPixel - 1, onColor)
painter.drawImage(rect, image2, image2.rect())
block = self.firstVisibleBlock()
blockNum = block.blockNumber()
top = int(self.blockBoundingGeometry(block).translated(self.contentOffset()).top())
bottom = top + int(self.blockBoundingRect(block).height())
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(blockNum + 1)
painter.setPen(QtCore.Qt.black)
painter.drawText(0, top, self.sideBar.width(), self.fontMetrics().height(), QtCore.Qt.AlignRight, number)
block = block.next()
top = bottom
bottom = top + int(self.blockBoundingRect(block).height())
blockNum += 1
class GridLineDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent, *args, **kwargs):
self.parent = parent
QtGui.QStyledItemDelegate.__init__(self, *args, **kwargs)
self.highCol = 0
self.lastRow = 0
def paint(self, painter, option, index):
QtGui.QStyledItemDelegate.paint(self, painter, option, index)
item = self.parent.itemFromIndex(index)
pen = QtGui.QPen()
pen.setWidth(1)
painter.setPen(pen)
if isinstance(item, TreeWidgetWithBarGraph):
painter.drawRect(option.rect)
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
if index.row() <= self.lastRow:
self.highCol = index.column()
item.draw(painter)
elif index.column() == self.highCol:
item.draw(painter)
self.lastRow = index.row()
class EditorWindow( QMainWindow ):
def __init__(self, sourceFile, line, column, EditorType, parent, project = None, directory = None, baseFile = None, data = None, filemap = None, *args, **kwargs):
QMainWindow.__init__(self, parent, *args, **kwargs)
self.resize(1275, 600)
self.project = project
self.centralWidget = QtGui.QWidget(self)
self.centralWidget.setObjectName("centralWidget")
self.outerLayout = QtGui.QVBoxLayout(self.centralWidget)
self.editor = EditorType(self.centralWidget, self, project, directory)
self.editor.setStyleSheet(
"""
QPlainTextEdit
{
color: black;
background-color: white;
}
"""
)
self.filemap = filemap
self.highlighter = SyntaxHighlighter(self.editor.document())
self.editor.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
self.statusBar = QtGui.QStatusBar()
self.setStatusBar(self.statusBar)
self.outerLayout.addWidget(self.editor)
self.highlighting = False
self.sourceFile = sourceFile
self.innerLayout = QtGui.QHBoxLayout()
if EditorType == CodeEditor:
self.isCodeEditor = True
horizontalSpacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.innerLayout.addItem(horizontalSpacer)
self.makeWriteable = QtGui.QPushButton(self.centralWidget)
self.makeWriteable.setText("Make Writeable")
self.makeWriteable.pressed.connect(self.MakeWriteable)
self.innerLayout.addWidget(self.makeWriteable)
if os.access(sourceFile, os.W_OK):
self.makeWriteable.hide()
else:
self.editor.setReadOnly(True)
self.saveButton = QtGui.QPushButton(self.centralWidget)
self.saveButton.setText("Save")
self.saveButton.pressed.connect(self.save)
self.innerLayout.addWidget(self.saveButton)
self.outerLayout.addLayout(self.innerLayout)
self.saveAction = QtGui.QAction(self)
self.saveAction.setShortcut( QtCore.Qt.CTRL | QtCore.Qt.Key_S )
self.saveAction.triggered.connect(self.save)
self.addAction(self.saveAction)
else:
self.isCodeEditor = False
label = QtGui.QLabel(self.centralWidget)
label.setText("Highlight values approaching:")
self.innerLayout.addWidget(label)
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self.centralWidget)
self.innerLayout.addWidget(self.slider)
self.slider.valueChanged.connect(self.editor.sliderMoved)
self.slider.sliderReleased.connect(self.editor.sliderReleased)
self.textBox = QtGui.QLineEdit(self.centralWidget)
self.textBox.setMaximumWidth(160)
self.innerLayout.addWidget(self.textBox)
self.textBox.editingFinished.connect(self.editor.textEdited)
self.outerLayout.addLayout(self.innerLayout)
if data:
self.editor.setPlainText(data)
else:
with open(sourceFile, "r") as f:
self.editor.setPlainText(f.read())
self.setCentralWidget(self.centralWidget)
if baseFile:
self.setWindowTitle("Profile view: {}".format(baseFile))
else:
self.setWindowTitle(sourceFile)
def ScrollTo(self, line, column):
if line or column:
cursor = self.editor.textCursor()
cursor.setPosition(0)
if line:
line = int(line)
cursor.movePosition( QtGui.QTextCursor.Down, QtGui.QTextCursor.MoveAnchor, line - 1 )
if column:
column = int(column)
cursor.movePosition( QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.MoveAnchor, column - 1 )
self.editor.setTextCursor(cursor)
def MakeWriteable(self):
stats = os.stat(self.sourceFile)
mode = stats.st_mode
try:
os.chmod( self.sourceFile, mode | stat.S_IWRITE )
except:
self.statusBar.showMessage("Could not open file for writing. Permission error?.", 5000)
else:
self.makeWriteable.hide()
self.editor.setReadOnly(False)
self.statusBar.showMessage("File opened for writing.", 5000)
def save(self):
with open(self.sourceFile, "w") as f:
f.write(self.editor.toPlainText())
self.statusBar.showMessage("Saved.", 5000)
def closeEvent(self, event):
if self.isCodeEditor:
del self.parent().openWindows[self.sourceFile]
QMainWindow.closeEvent(self, event)
class MainWindow( QMainWindow ):
def __init__(self, *args, **kwargs):
self.exitRequested = False
QMainWindow.__init__(self, *args, **kwargs)
self.setObjectName("MainWindow")
self.resize(1275, 600)
self.centralWidget = QtGui.QWidget(self)
self.centralWidget.setObjectName("centralWidget")
self.outerLayout = QtGui.QVBoxLayout(self.centralWidget)
self.mainLayout = QtGui.QHBoxLayout()
self.m_splitter = QtGui.QSplitter(self.centralWidget)
self.m_splitter.setOrientation(QtCore.Qt.Vertical)
self.innerWidget = QtGui.QWidget(self.centralWidget)
self.innerLayout = QtGui.QVBoxLayout(self.innerWidget)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.m_buildSummaryLabel = QtGui.QLabel(self.innerWidget)
self.m_buildSummaryLabel.setObjectName("m_buildSummaryLabel")
font = QtGui.QFont()
font.setPointSize( 16 )
self.m_buildSummaryLabel.setFont(font)
self.verticalLayout.addWidget(self.m_buildSummaryLabel)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.m_successfulBuildsLabel = QtGui.QLabel(self.innerWidget)
self.m_successfulBuildsLabel.setObjectName("m_successfulBuildsLabel")
self.horizontalLayout.addWidget(self.m_successfulBuildsLabel)
self.m_failedBuildsLabel = QtGui.QLabel(self.innerWidget)
self.m_failedBuildsLabel.setObjectName("m_failedBuildsLabel")
self.horizontalLayout.addWidget(self.m_failedBuildsLabel)
self.m_warningLabel = QtGui.QLabel(self.innerWidget)
self.m_warningLabel.setObjectName("m_successfulBuildsLabel")
self.horizontalLayout.addWidget(self.m_warningLabel)
self.m_errorLabel = QtGui.QLabel(self.innerWidget)
self.m_errorLabel.setObjectName("m_failedBuildsLabel")
self.horizontalLayout.addWidget(self.m_errorLabel)
horizontalSpacer_2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(horizontalSpacer_2)
self.m_filesCompletedLabel = QtGui.QLabel(self.centralWidget)
self.m_filesCompletedLabel.setObjectName("m_filesCompletedLabel")
self.horizontalLayout.addWidget(self.m_filesCompletedLabel)
self.verticalLayout.addLayout(self.horizontalLayout)
self.m_mainProgressBar = QtGui.QProgressBar(self.centralWidget)
self.m_mainProgressBar.setObjectName("m_mainProgressBar")
self.m_mainProgressBar.setValue(0)
self.verticalLayout.addWidget(self.m_mainProgressBar)
self.topPane = QtGui.QTabWidget(self.innerWidget)
self.buildWidget = QtGui.QWidget(self.innerWidget)
verticalLayout = QtGui.QVBoxLayout(self.buildWidget)
self.m_buildTree = QtGui.QTreeWidget(self.buildWidget)
self.m_buildTree.setColumnCount(12)
self.m_buildTree.setUniformRowHeights(True)
self.m_treeHeader = TreeWidgetItem()
self.m_buildTree.setHeaderItem(self.m_treeHeader)
self.m_buildTree.setObjectName("m_buildTree")
self.m_buildTree.setAlternatingRowColors(True)
self.m_buildTree.setUniformRowHeights(True)
self.m_buildTree.setSortingEnabled(True)
self.m_buildTree.setAnimated(True)
self.m_buildTree.header().setStretchLastSection(True)
self.m_buildTree.currentItemChanged.connect(self.SelectionChanged)
self.m_buildTree.itemExpanded.connect(self.UpdateProjects)
self.m_buildTree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.m_buildTree.customContextMenuRequested.connect(self.buildTreeContextMenu)
verticalLayout.addWidget(self.m_buildTree)
self.topPane.addTab(self.buildWidget, "Build Progress")
self.timelinePage = QtGui.QWidget(self.centralWidget)
verticalLayout = QtGui.QVBoxLayout(self.timelinePage)
self.timelineWidget = QtGui.QTreeWidget(self.timelinePage)
self.m_timelineHeader = TreeWidgetItem()
self.timelineWidget.setHeaderItem(self.m_timelineHeader)
self.timelineWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.timelineWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.timelineWidget.setProperty("showDropIndicator", False)
#self.timelineWidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.timelineWidget.setAlternatingRowColors(True)
#self.timelineWidget.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
#self.timelineWidget.setAnimated(True)
self.timelineWidget.header().setDefaultSectionSize(30)
self.timelineWidget.header().setStretchLastSection(False)
if USING_PYQT5:
self.timelineWidget.header().setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
else:
self.timelineWidget.header().setResizeMode(QtGui.QHeaderView.Fixed)
self.timelineWidget.itemExpanded.connect(self.TimelineItemExpended)
self.timelineWidget.itemCollapsed.connect(self.TimelineItemExpended)
self.timelineWidget.setItemDelegate(GridLineDelegate(self.timelineWidget))
self.timelineWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.timelineWidget.customContextMenuRequested.connect(self.timelineContextMenu)
verticalLayout.addWidget(self.timelineWidget)
self.topPane.addTab(self.timelinePage, "Build Timeline")
self.verticalLayout.addWidget(self.topPane)
self.innerLayout.addLayout(self.verticalLayout)
self.m_pushButton = QtGui.QPushButton(self.buildWidget)
self.m_pushButton.setObjectName("self.m_pushButton")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.m_pushButton.sizePolicy().hasHeightForWidth())
self.m_pushButton.setSizePolicy(sizePolicy)
self.m_pushButton.setMaximumSize(QtCore.QSize(16777215, 20))
self.m_pushButton.setCheckable(True)
self.m_pushButton.toggled.connect(self.ButtonClicked)
self.innerLayout.addWidget(self.m_pushButton)
self.m_splitter.addWidget(self.innerWidget)
self.innerWidget2 = QtGui.QTabWidget(self.centralWidget)
self.textPage = QtGui.QWidget(self.innerWidget2)
self.innerLayout2 = QtGui.QVBoxLayout(self.textPage)
self.m_textEdit = QtGui.QTextEdit(self.textPage)
self.m_textEdit.setObjectName("textEdit")
self.m_textEdit.setReadOnly(True)
self.m_textEdit.setFontFamily("monospace")
self.innerLayout2.addWidget(self.m_textEdit)
self.commandPage = QtGui.QWidget(self.innerWidget2)
self.innerLayout2 = QtGui.QVBoxLayout(self.commandPage)
self.m_commandEdit = QtGui.QTextEdit(self.commandPage)
self.m_commandEdit.setObjectName("commandEdit")
self.m_commandEdit.setReadOnly(True)
self.m_commandEdit.setFontFamily("monospace")
self.innerLayout2.addWidget(self.m_commandEdit)
self.errorsPage = QtGui.QWidget(self.innerWidget2)
self.innerLayout3 = QtGui.QVBoxLayout(self.errorsPage)
self.m_errorTree = QtGui.QTreeWidget(self.errorsPage)
self.m_errorTree.setColumnCount(5)
self.m_errorTree.setUniformRowHeights(True)
self.m_treeHeader2 = TreeWidgetItem()
self.m_errorTree.setHeaderItem(self.m_treeHeader2)
self.m_errorTree.setObjectName("m_errorTree")
self.m_errorTree.setAlternatingRowColors(True)
self.m_errorTree.setUniformRowHeights(True)
self.m_errorTree.setSortingEnabled(True)
self.m_errorTree.setAnimated(True)
self.m_errorTree.header().setStretchLastSection(True)
self.m_errorTree.itemDoubleClicked.connect(self.OpenFileForEdit)
self.innerLayout3.addWidget(self.m_errorTree)
self.innerWidget2.addTab(self.errorsPage, "Errors/Warnings")
self.innerWidget2.addTab(self.textPage, "Text Output")
self.innerWidget2.addTab(self.commandPage, "Command Line")
self.m_splitter.addWidget(self.innerWidget2)
self.m_splitter.setSizes( [ 1, 0 ] )
self.m_splitter.setCollapsible( 0, False )
self.m_splitter.splitterMoved.connect(self.SplitterMoved)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.m_splitter.sizePolicy().hasHeightForWidth())
self.m_splitter.setSizePolicy(sizePolicy)
self.mainLayout.addWidget(self.m_splitter)
self.outerLayout.addLayout(self.mainLayout)
#self.horizontalLayout_2 = QtGui.QHBoxLayout()
#self.horizontalLayout_2.setObjectName("horizontalLayout_2")
#horizontalSpacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
#self.horizontalLayout_2.addItem(horizontalSpacer)
self.m_timeLeftLabel = QtGui.QLabel(self.centralWidget)
#self.m_timeLeftLabel.setObjectName("m_timeLeftLabel")
#self.horizontalLayout_2.addWidget(self.m_timeLeftLabel)
self.m_timeLeftLabel.hide()
#self.outerLayout.addLayout(self.horizontalLayout_2)
self.setCentralWidget(self.centralWidget)
self.retranslateUi()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.onTick)
self.timer.start(100)
QtCore.QMetaObject.connectSlotsByName(self)
self.readyToClose = False
self.exiting = False
self.marqueeValue = 0
self.marqueeInverted = True
self.successfulBuilds = set()
self.failedBuilds = set()
self.m_ignoreButton = False
self.pulseColor = 0
self.pulseUp = False
self.animatingBars = {}
self.projectToItem = {}
self.itemToProject = {}
self.warningErrorCount = 0
self.openWindows = {}
self.tick = 0
self.selectedItem = None
def buildTreeContextMenu(self, point):
if not _shared_globals.profile:
return
if not self.readyToClose:
return
item = self.m_buildTree.itemAt(point)
parent = item.parent()
if not parent:
return
if parent.parent():
return
self.selectedItem = item
menu = QtGui.QMenu(self)
action = QtGui.QAction("View profile data", self)
action.triggered.connect(self.buildTreeViewProfile)
menu.addAction(action)
menu.popup(self.m_buildTree.viewport().mapToGlobal(point))
def timelineContextMenu(self, point):
if not _shared_globals.profile:
return
if not self.readyToClose:
return
item = self.timelineWidget.itemAt(point)
parent = item.parent()
if not parent:
return
if parent.parent():
return
self.selectedItem = item
menu = QtGui.QMenu(self)
action = QtGui.QAction("View profile data", self)
action.triggered.connect(self.timelineViewProfile)
menu.addAction(action)
menu.popup(self.timelineWidget.viewport().mapToGlobal(point))
def launchProfileView(self, project, filename):
baseFile = os.path.basename(filename)
directory = os.path.dirname(filename)
with open(filename, "r") as f:
data = f.read().split("\n")
io = StringIO()
lineNo = 1
for line in data:
lineTime = 0.0
if lineNo in project.times[filename][filename]:
lineTime = project.times[filename][filename][lineNo]
io.write("{: 9.6f}\t\t{}\n".format(lineTime, line))
lineNo += 1
data = io.getvalue()
io.close()
filemap = {}
for otherfile in project.times[filename]:
baseName = os.path.basename(otherfile)
if baseName not in filemap:
filemap[baseName] = {otherfile}
else:
filemap[baseName].add(otherfile)
window = EditorWindow(filename, 0, 0, CodeProfileDisplay, self, baseFile=baseFile, project=project, directory=directory, data=data, filemap=filemap)
window.show()
def buildTreeViewProfile(self):
if not self.selectedItem:
return
item = self.selectedItem
filename = os.path.normcase(str(item.toolTip(3)))
project = self.itemToProject[str(item.parent().text(0))]
self.launchProfileView(project, filename)
def timelineViewProfile(self):
if not self.selectedItem:
return
item = self.selectedItem
filename = os.path.normcase(str(item.toolTip(0)))
idx = self.timelineWidget.indexOfTopLevelItem(self.parent())
project = _shared_globals.sortedProjects[idx]
self.launchProfileView(project, filename)
def ButtonClicked(self, toggled):
if self.m_ignoreButton:
return
if toggled:
self.m_splitter.setSizes( [ 1275, max( self.width() - 1275, 600 ) ] )
self.m_errorTree.setColumnWidth( 0, 50 )
self.m_errorTree.setColumnWidth( 1, max(250, self.m_errorTree.width() - 350) )
self.m_errorTree.setColumnWidth( 2, 200 )
self.m_errorTree.setColumnWidth( 3, 50 )
self.m_errorTree.setColumnWidth( 4, 50 )
self._setOutputButtonTextWithDownArrows()
else:
self.m_splitter.setSizes( [ 1, 0 ] )
self._setOutputButtonTextWithUpArrows()
def OpenFileForEdit(self, item, column):
file = str(item.toolTip(2))
line = item.text(3)
col = item.text(4)
if not file or not os.access(file, os.F_OK):
return
if file in self.openWindows:
window = self.openWindows[file]
window.setWindowState(QtCore.Qt.WindowActive)
window.activateWindow()
window.raise_()
window.ScrollTo(line, col)
return
if(
#TODO: Somehow get extension from the active toolchain?
not file.endswith(".o")
and not file.endswith(".so")
and not file.endswith(".a")
and not file.endswith(".exe")
and not file.endswith(".dll")
and not file.endswith(".lib")
and not file.endswith(".obj")
):
window = EditorWindow(file, line, col, CodeEditor, self)
window.show()
window.ScrollTo(line, col)
self.openWindows[file] = window
def resizeEvent(self, event):
QMainWindow.resizeEvent(self, event)
textBoxSize = self.m_splitter.sizes()[1]
if textBoxSize != 0:
self.m_splitter.setSizes( [ 1275, max( self.width() - 1275, 600 ) ] )
self.m_errorTree.setColumnWidth( 0, 50 )
self.m_errorTree.setColumnWidth( 1, max(250, self.m_errorTree.width() - 350) )
self.m_errorTree.setColumnWidth( 2, 200 )
self.m_errorTree.setColumnWidth( 3, 50 )
self.m_errorTree.setColumnWidth( 4, 50 )
def SplitterMoved(self, index, pos):
textBoxSize = self.m_splitter.sizes()[1]
if textBoxSize == 0:
if self.m_pushButton.isChecked():
self.m_ignoreButton = True
self.m_pushButton.setChecked(False)
self.m_ignoreButton = False
self._setOutputButtonTextWithUpArrows()
else:
if not self.m_pushButton.isChecked():
self.m_ignoreButton = True
self.m_pushButton.setChecked(True)
self.m_ignoreButton = False
self.m_errorTree.setColumnWidth( 0, 50 )
self.m_errorTree.setColumnWidth( 1, max(250, self.m_errorTree.width() - 350) )
self.m_errorTree.setColumnWidth( 2, 200 )
self.m_errorTree.setColumnWidth( 3, 50 )
self.m_errorTree.setColumnWidth( 4, 50 )
self._setOutputButtonTextWithDownArrows()
def SelectionChanged(self, current, previous):
if self.m_textEdit.isVisible():
if current is None:
outStr = ""
for project in _shared_globals.sortedProjects:
outStr += ("=" * 40) + "\n\n"
outStr += project.name
outStr += ("=" * 40) + "\n\n"
with project.mutex:
for filename in project.compileOutput:
outStr += filename
errors = ""
output = ""
if filename in project.compileErrors:
errors = project.compileErrors[filename]
output = project.compileOutput[filename]
if errors or output:
outStr += "\n" + ("-" * len(filename)) + "\n\n"
outStr += "\n" + ("-" * 40) + "\n\n"
if errors:
outStr += "ERROR OUTPUT:\n\n" + errors + "\n\n"
if output:
outStr += "OUTPUT:\n\n" + output + "\n\n"
if project.linkErrors:
outStr += "LINK ERRORS:\n\n" + project.linkErrors + "\n\n"
if project.linkOutput:
outStr += "LINK OUTPUT:\n\n" + project.linkOutput + "\n\n"
outStr += "\n\n"
if outStr != self.m_textEdit.toPlainText():
self.m_textEdit.setText(outStr)
else:
for project in _shared_globals.sortedProjects:
widget = self.projectToItem[project]
if not widget:
continue
if widget == current:
outStr = ""
with project.mutex:
for filename in project.compileOutput:
errors = ""
output = ""
if filename in project.compileErrors:
errors = project.compileErrors[filename]
output = project.compileOutput[filename]
if errors or output:
outStr += filename
outStr += "\n" + ("=" * 40) + "\n\n"
if errors:
outStr += "ERROR OUTPUT:\n\n" + errors + "\n\n"
if output:
outStr += "OUTPUT:\n\n" + output + "\n\n"
if project.linkErrors:
outStr += "LINK ERRORS:\n\n" + project.linkErrors + "\n\n"
if project.linkOutput:
outStr += "LINK OUTPUT:\n\n" + project.linkOutput + "\n\n"
if outStr != self.m_textEdit.toPlainText():
self.m_textEdit.setText(outStr)
elif widget.isExpanded():
def HandleChild( idx, file ):
file = os.path.normcase(file)
childWidget = widget.child(idx)
if childWidget == current:
outStr = ""
errors = ""
output = ""
with project.mutex:
if file in project.compileErrors:
errors = project.compileErrors[file]
if file in project.compileOutput:
output = project.compileOutput[file]
if errors or output:
outStr += file
outStr += "\n" + ("=" * 40) + "\n\n"
if errors:
outStr += "ERROR OUTPUT:\n\n" + errors + "\n\n"
if output:
outStr += "OUTPUT:\n\n" + output + "\n\n"
if outStr != self.m_textEdit.toPlainText():
self.m_textEdit.setText(outStr)
idx = 0
if project.needsPrecompileCpp:
HandleChild( idx, project.cppHeaderFile )
idx += 1
if project.needsPrecompileC:
HandleChild( idx, project.cHeaderFile )
idx += 1
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
if inThisBuild:
HandleChild( idx, source )
idx += 1
elif self.m_commandEdit.isVisible():
if current is not None:
for project in _shared_globals.sortedProjects:
widget = self.projectToItem[project]
if not widget:
continue
if widget == current:
self.m_commandEdit.setText(project.linkCommand)
elif widget.isExpanded():
def HandleChild( idx, file ):
file = os.path.normcase(file)
childWidget = widget.child(idx)
if childWidget == current:
if file in project.compileCommands:
self.m_commandEdit.setText(project.compileCommands[file])
else:
self.m_commandEdit.setText("")
idx = 0
if project.needsPrecompileCpp:
HandleChild( idx, project.cppHeaderFile )
idx += 1
if project.needsPrecompileC:
HandleChild( idx, project.cHeaderFile )
idx += 1
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
if inThisBuild:
HandleChild( idx, source )
idx += 1
else:
if current != previous:
while self.m_errorTree.takeTopLevelItem(0):
pass
def HandleError(datas):
if datas is None:
return
for data in datas:
exists = False
for i in range(self.m_errorTree.topLevelItemCount()):
tempWidget = self.m_errorTree.topLevelItem(i)
if(
tempWidget.text(1) == data.text
and tempWidget.text(2) == os.path.basename( data.file )
and (
( tempWidget.text(3) == "" and data.line == -1 )
or ( tempWidget.text(3) == str(data.line) )
)
and (
( tempWidget.text(4) == "" and data.column == -1 )
or ( tempWidget.text(4) == str(data.column) )
)
):
#don't re-add data that already exists.
exists = True
break
if exists:
continue
font = QtGui.QFont()
font.setFamily("monospace")
newItem = TreeWidgetItem()
if data.level == _shared_globals.OutputLevel.WARNING:
newItem.setText(0, "W")
brush = QtGui.QBrush( QtCore.Qt.darkYellow )
newItem.setForeground(0, brush )
#newItem.setForeground(1, brush )
#newItem.setForeground(2, brush )
#newItem.setForeground(3, brush )
#newItem.setForeground(4, brush )
elif data.level == _shared_globals.OutputLevel.ERROR:
newItem.setText(0, "E")
brush = QtGui.QBrush( QtCore.Qt.red )
newItem.setForeground(0, brush )
#newItem.setForeground(1, brush )
#newItem.setForeground(2, brush )
#newItem.setForeground(3, brush )
#newItem.setForeground(4, brush )
font.setBold(True)
elif data.level == _shared_globals.OutputLevel.NOTE:
newItem.setText(0, "N")
else:
newItem.setText(0, "?")
newItem.setText(1, data.text)
newItem.setToolTip(1, data.text)
if data.file:
newItem.setText(2, os.path.basename(data.file))
newItem.setToolTip(2, os.path.abspath(data.file))
if data.line != -1:
newItem.setText(3, str(data.line))
if data.column != -1:
newItem.setText(4, str(data.column))
newItem.setFont(0, font)
newItem.setFont(1, font)
newItem.setFont(2, font)
newItem.setFont(3, font)
newItem.setFont(4, font)
for detail in data.details:
font = QtGui.QFont()
font.setItalic(True)
font.setFamily("monospace")
childItem = TreeWidgetItem(newItem)
childItem.setDisabled(True)
if detail.level == _shared_globals.OutputLevel.NOTE:
font.setBold(True)
childItem.setText(1, detail.text)
childItem.setToolTip(1, detail.text)
if detail.file:
childItem.setText(2, os.path.basename(detail.file))
childItem.setToolTip(2, os.path.abspath(detail.file))
if detail.line != -1:
childItem.setText(3, str(detail.line))
if detail.column != -1:
childItem.setText(4, str(detail.column))
childItem.setFont(0, font)
childItem.setFont(1, font)
childItem.setFont(2, font)
childItem.setFont(3, font)
childItem.setFont(4, font)
newItem.addChild(childItem)
self.m_errorTree.addTopLevelItem(newItem)
self.m_errorTree.setSortingEnabled(False)
if current is None:
for project in _shared_globals.sortedProjects:
with project.mutex:
for filename in project.parsedErrors:
HandleError(project.parsedErrors[filename])
HandleError(project.parsedLinkErrors)
else:
for project in _shared_globals.sortedProjects:
widget = self.projectToItem[project]
if not widget:
continue
if widget == current:
with project.mutex:
for filename in project.parsedErrors:
HandleError(project.parsedErrors[filename])
HandleError(project.parsedLinkErrors)
elif widget.isExpanded():
def HandleChild( idx, file ):
file = os.path.normcase(file)
childWidget = widget.child(idx)
if childWidget == current:
with project.mutex:
if file in project.parsedErrors:
HandleError(project.parsedErrors[file])
idx = 0
if project.needsPrecompileCpp:
HandleChild( idx, project.cppHeaderFile )
idx += 1
if project.needsPrecompileC:
HandleChild( idx, project.cHeaderFile )
idx += 1
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
if inThisBuild:
HandleChild( idx, source )
idx += 1
self.m_errorTree.setSortingEnabled(True)
def TimelineItemExpended(self, item):
self.UpdateTimeline(False)
def UpdateTimeline(self, addTime = False):
needsUpdate = False
if addTime:
font = QtGui.QFont()
font.setPointSize(5)
curtime = time.time( ) - _shared_globals.starttime
mult = 1
curtime *= mult
cols = self.m_timelineHeader.columnCount()
colsNeeded = int(math.ceil(curtime)) + 1
if colsNeeded > cols:
scrollBar = self.timelineWidget.horizontalScrollBar()
max = scrollBar.maximum()
needsUpdate = True
for i in range(colsNeeded - cols):
idx = cols + i - 1
self.m_timelineHeader.setFont(idx + 1, font)
if idx % (10*mult) == 0:
minutes = int(math.floor( idx / (60*mult) ))
seconds = int(round( idx % (60*mult) ))
self.m_timelineHeader.setText(idx+1, "{}:{:02}".format(minutes, seconds/mult))
else:
self.m_timelineHeader.setText(idx+1, "")
if scrollBar.value() == max:
scrollBar.setValue(scrollBar.maximum())
else:
needsUpdate = True
if not needsUpdate:
return
idx = 0
for project in _shared_globals.sortedProjects:
item = self.timelineWidget.topLevelItem(idx)
if project.startTime != 0:
item.setStartTime(project.startTime)
if project.buildEnd != 0:
item.setBuildEnd(project.buildEnd)
if project.linkQueueStart != 0:
item.setLinkQueueStart(project.linkQueueStart)
if project.linkStart != 0:
item.setLinkStart(project.linkStart)
if project.endTime != 0:
item.setEndTime(project.endTime)
if item.isExpanded() or item.childrenShowing():
item.setChildrenShowing(item.isExpanded())
def HandleChildTimeline( idx2, file ):
childWidget = item.child(idx2)
file = os.path.normcase(file)
project.mutex.acquire( )
try:
startTime = project.fileStart[file]
except:
startTime = 0
try:
endTime = project.fileEnd[file]
except:
endTime = 0
project.mutex.release( )
if startTime != 0:
childWidget.setStartTime(startTime)
if endTime != 0:
childWidget.setBuildEnd(endTime)
idx2 = 0
if project.needsPrecompileCpp:
HandleChildTimeline( idx2, project.cppHeaderFile )
idx2 += 1
if project.needsPrecompileC:
HandleChildTimeline( idx2, project.cHeaderFile )
idx2 += 1
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
if inThisBuild:
HandleChildTimeline( idx2, source )
idx2 += 1
idx += 1
def UpdateProjects(self, expandedItem = None):
updatedProjects = []
if expandedItem is not None:
text = str( expandedItem.text(0) )
if text and text in self.itemToProject:
updatedProjects = [ self.itemToProject[text] ]
else:
for project in _shared_globals.sortedProjects:
with project.mutex:
if project.updated or project:
updatedProjects.append(project)
project.updated = False
class SharedLocals(object):
foundAnError = bool(self.warningErrorCount != 0)
def drawProgressBar( progressBar, widget, state, startTime, endTime, percent, forFile, warnings, errors ):
if warnings > 0:
brush = QtGui.QBrush( QtCore.Qt.darkYellow )
font = QtGui.QFont()
font.setBold(True)
widget.setForeground( 7, brush )
widget.setFont( 7, font )
if errors > 0:
brush = QtGui.QBrush( QtCore.Qt.red )
font = QtGui.QFont()
font.setBold(True)
widget.setForeground( 8, brush )
widget.setFont( 8, font )
if ( warnings > 0 or errors > 0 ) and not SharedLocals.foundAnError:
self.m_buildTree.setCurrentItem(widget)
if not self.m_pushButton.isChecked():
self.m_pushButton.setChecked(True)
SharedLocals.foundAnError = True
if _shared_globals.ProjectState.BUILDING <= state < _shared_globals.ProjectState.FAILED:
if not forFile or state != _shared_globals.ProjectState.BUILDING:
if state == _shared_globals.ProjectState.BUILDING:
if percent < 1:
percent = 1
value = progressBar.value()
quarter = max( 4.0, (percent - value) / 4.0 )
if value < percent - quarter:
progressBar.setValue( value + quarter )
else:
progressBar.setValue( percent )
else:
progressBar.setValue( percent )
progressBar.setTextVisible(True)
if widget.text(1) != str(percent):
widget.setText(1, str(percent))
else:
if widget.text(1) != "0":
widget.setText(1, "0")
progressBar.setFormat( "%p%" )
if state >= _shared_globals.ProjectState.BUILDING:
widget.setText(7, str(warnings))
widget.setText(8, str(errors))
widget.setText(9, time.asctime(time.localtime(startTime)))
if state == _shared_globals.ProjectState.BUILDING:
self.animatingBars[progressBar] = ( widget, state, startTime, endTime, percent, forFile, warnings, errors )
widget.setText(2, "Building")
if forFile:
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{{
background-color: #FF{:02x}00;
}}
QProgressBar
{{
border: 1px solid black;
border-radius: 3px;
padding: 0px;
text-align: center;
}}
""".format(self.pulseColor+127)
)
progressBar.setValue( 100 )
progressBar.setTextVisible(False)
else:
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{{
background-color: #00{:02x}FF;
}}
QProgressBar
{{
border: 1px solid black;
border-radius: 3px;
padding: 0px;
text-align: center;
}}
""".format(self.pulseColor)
)
elif state == _shared_globals.ProjectState.LINK_QUEUED:
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
widget.setText(2,"Link/Queue")
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{
background-color: #00C0C0;
}
QProgressBar
{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
}
"""
)
elif state == _shared_globals.ProjectState.WAITING_FOR_LINK:
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
widget.setText(2,"Link/Wait")
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{
background-color: #008080;
}
QProgressBar
{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
}
"""
)
elif state == _shared_globals.ProjectState.LINKING:
self.animatingBars[progressBar] = ( widget, state, startTime, endTime, percent, forFile, warnings, errors )
widget.setText(2, "Linking")
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{{
background-color: #00E0{:02x};
}}
QProgressBar
{{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
color: black;
}}
""".format(self.pulseColor + 64)
)
elif state == _shared_globals.ProjectState.FINISHED:
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
widget.setText(2, "Done!")
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{{
background-color: #{};
}}
QProgressBar
{{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
color: black;
}}
""".format( "ADFFD0" if forFile else "00FF80" )
)
widget.setText(10, time.asctime(time.localtime(endTime)))
timeDiff = endTime - startTime
minutes = math.floor( timeDiff / 60 )
seconds = math.floor( timeDiff % 60 )
widget.setText(11, "{0:2}:{1:02}".format( int(minutes), int(seconds) ) )
elif state == _shared_globals.ProjectState.FAILED or state == _shared_globals.ProjectState.LINK_FAILED:
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
progressBar.setTextVisible(True)
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{
background-color: #800000;
}
QProgressBar
{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
}
"""
)
progressBar.setValue(100)
if state == _shared_globals.ProjectState.FAILED:
widget.setText(2, "Failed!")
progressBar.setFormat("FAILED!")
else:
widget.setText(2, "Link Failed!")
progressBar.setFormat("LINK FAILED!")
widget.setText(10, time.asctime(time.localtime(endTime)))
timeDiff = endTime - startTime
minutes = math.floor( timeDiff / 60 )
seconds = math.floor( timeDiff % 60 )
widget.setText(11, "{0:2}:{1:02}".format( int(minutes), int(seconds) ) )
elif state == _shared_globals.ProjectState.UP_TO_DATE:
self.SetProgressBarUpToDate( progressBar, widget, endTime, startTime, forFile )
elif state == _shared_globals.ProjectState.ABORTED:
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
widget.setText(2, "Aborted!")
progressBar.setTextVisible(True)
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{
background-color: #800040;
}
QProgressBar
{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
}
"""
)
progressBar.setValue(100)
if forFile:
progressBar.setFormat("ABORTED! (PCH Failed!)")
else:
progressBar.setFormat("ABORTED! (Dependency Failed!)")
if updatedProjects:
self.m_buildTree.setSortingEnabled(False)
if self.pulseColor == 0 or self.pulseColor == 128:
self.pulseUp = not self.pulseUp
if self.pulseUp:
self.pulseColor += 32
else:
self.pulseColor -= 32
if self.pulseColor > 128:
self.pulseColor = 128
if self.pulseColor < 0:
self.pulseColor = 0
selectedWidget = self.m_buildTree.currentItem()
for project in updatedProjects:
widget = self.projectToItem[project]
if not widget:
continue
if selectedWidget == widget:
self.SelectionChanged(selectedWidget, selectedWidget)
progressBar = self.m_buildTree.itemWidget(widget, 1)
project.mutex.acquire( )
complete = project.compilationCompleted
project.mutex.release( )
total = len( project._finalChunkSet ) + int(
project.needsPrecompileC ) + int(
project.needsPrecompileCpp )
percent = 100 if total == 0 else ( float(complete) / float(total) ) * 100
if percent == 100 and project.state < _shared_globals.ProjectState.FINISHED:
percent = 99
drawProgressBar( progressBar, widget, project.state, project.startTime, project.endTime, percent, False, project.warnings, project.errors )
if project.state == _shared_globals.ProjectState.FINISHED or project.state == _shared_globals.ProjectState.UP_TO_DATE:
self.successfulBuilds.add(project.key)
elif(
project.state == _shared_globals.ProjectState.FAILED
or project.state == _shared_globals.ProjectState.LINK_FAILED
or project.state == _shared_globals.ProjectState.ABORTED
):
self.failedBuilds.add(project.key)
if widget.isExpanded():
def HandleChildProgressBar( idx, file ):
childWidget = widget.child(idx)
progressBar = self.m_buildTree.itemWidget(childWidget, 1)
file = os.path.normcase(file)
project.mutex.acquire( )
try:
state = project.fileStatus[file]
except:
state = _shared_globals.ProjectState.PENDING
try:
startTime = project.fileStart[file]
except:
startTime = 0
try:
endTime = project.fileEnd[file]
except:
endTime = 0
warnings = 0
errors = 0
if file in project.warningsByFile:
warnings = project.warningsByFile[file]
if file in project.errorsByFile:
errors = project.errorsByFile[file]
project.mutex.release( )
drawProgressBar( progressBar, childWidget, state, startTime, endTime, 0 if state <= _shared_globals.ProjectState.BUILDING else 100, True, warnings, errors )
if selectedWidget == childWidget:
self.SelectionChanged(selectedWidget, selectedWidget)
idx = 0
if project.needsPrecompileCpp:
HandleChildProgressBar( idx, project.cppHeaderFile )
idx += 1
if project.needsPrecompileC:
HandleChildProgressBar( idx, project.cHeaderFile )
idx += 1
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
if inThisBuild:
HandleChildProgressBar( idx, source )
idx += 1
self.m_buildTree.setSortingEnabled(True)
successcount = len(self.successfulBuilds)
failcount = len(self.failedBuilds)
self.m_successfulBuildsLabel.setText("Successful Builds: {}".format(successcount))
self.m_failedBuildsLabel.setText("Failed Builds: {}".format(failcount))
if failcount > 0:
font = QtGui.QFont()
font.setBold(True)
self.m_failedBuildsLabel.setFont( font )
palette = QtGui.QPalette()
palette.setColor( self.m_errorLabel.foregroundRole(), QtCore.Qt.red )
self.m_failedBuildsLabel.setPalette(palette)
if successcount + failcount == len(_shared_globals.sortedProjects):
if _shared_globals.profile and not self.readyToClose:
window = QtGui.QMainWindow(self)
window.centralWidget = QtGui.QWidget(window)
window.setCentralWidget(window.centralWidget)
layout = QtGui.QHBoxLayout(window.centralWidget)
window.editor = QtGui.QPlainTextEdit(window.centralWidget)
font = QtGui.QFont()
font.setFamily("monospace")
window.editor.setFont(font)
window.editor.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
layout.addWidget(window.editor)
summedTimes = {}
for project in _shared_globals.sortedProjects:
for filename in project.summedTimes:
if filename in summedTimes:
summedTimes[filename] += project.summedTimes[filename]
else:
summedTimes[filename] = project.summedTimes[filename]
builder = StringIO()
for item in sorted(summedTimes.items(), key=lambda tup: tup[1], reverse=True):
builder.write("{:f}\t::{}\n".format(item[1], item[0]))
window.editor.setPlainText(builder.getvalue())
window.setWindowTitle("Profile Summary")
window.resize(1275,600)
window.show()
self.readyToClose = True
if _shared_globals.autoCloseGui and failcount == 0:
self.exiting = True
self.close()
if self.animatingBars:
for bar in self.animatingBars:
data = self.animatingBars[bar]
drawProgressBar( bar, *data )
def retranslateUi(self):
self.setWindowTitle("CSBuild {}".format(csbuild.__version__.strip()))
self.m_buildSummaryLabel.setText("Build Started at 00:00... (00:00)")
self.m_successfulBuildsLabel.setText("Successful Builds: 0")
self.m_failedBuildsLabel.setText("Failed Builds: 0")
self.m_warningLabel.setText("Warnings: 0")
self.m_errorLabel.setText("Errors: 0")
self.m_treeHeader.setText(0, "#")
self.m_treeHeader.setText(1, "Progress")
self.m_treeHeader.setText(2, "Status")
self.m_treeHeader.setText(3, "Name")
self.m_treeHeader.setText(4, "Target")
self.m_treeHeader.setText(5, "Arch")
self.m_treeHeader.setText(6, "Toolchain")
self.m_treeHeader.setText(7, "W")
self.m_treeHeader.setText(8, "E")
self.m_treeHeader.setText(9, "Build Started")
self.m_treeHeader.setText(10, "Build Finished")
self.m_treeHeader.setText(11, "Time")
self.m_treeHeader.setColumnNumeric(0)
self.m_treeHeader.setColumnNumeric(1)
self.m_treeHeader.setColumnNumeric(6)
self.m_treeHeader.setColumnNumeric(7)
self.m_buildTree.setColumnWidth( 0, 50 )
self.m_buildTree.setColumnWidth( 1, 250 )
self.m_buildTree.setColumnWidth( 2, 75 )
self.m_buildTree.setColumnWidth( 3, 125 )
self.m_buildTree.setColumnWidth( 4, 75 )
self.m_buildTree.setColumnWidth( 5, 75 )
self.m_buildTree.setColumnWidth( 6, 75 )
self.m_buildTree.setColumnWidth( 7, 25 )
self.m_buildTree.setColumnWidth( 8, 25 )
self.m_buildTree.setColumnWidth( 9, 175 )
self.m_buildTree.setColumnWidth( 10, 175 )
self.m_buildTree.setColumnWidth( 11, 50 )
self.m_timelineHeader.setText(0, "Name")
self.timelineWidget.setColumnWidth(0,250)
self.m_treeHeader2.setText(0, "Type")
self.m_treeHeader2.setText(1, "Output")
self.m_treeHeader2.setText(2, "File")
self.m_treeHeader2.setText(3, "Line")
self.m_treeHeader2.setText(4, "Col")
self.m_treeHeader2.setColumnNumeric(3)
self.m_treeHeader2.setColumnNumeric(4)
self.m_errorTree.setColumnWidth( 0, 50 )
self.m_errorTree.setColumnWidth( 1, max(250, self.m_errorTree.width() - 350) )
self.m_errorTree.setColumnWidth( 2, 200 )
self.m_errorTree.setColumnWidth( 3, 50 )
self.m_errorTree.setColumnWidth( 4, 50 )
self.m_filesCompletedLabel.setText("0/0 files compiled")
self.m_timeLeftLabel.setText("Est. Time Left: 0:00")
self._setOutputButtonTextWithUpArrows()
def _setOutputButtonTextWithUpArrows(self):
outputButtonText = "â–´ Output â–´"
if sys.version_info < (3,0):
outputButtonText = outputButtonText.decode("utf-8")
self.m_pushButton.setText(outputButtonText)
def _setOutputButtonTextWithDownArrows(self):
outputButtonText = "â–¾ Output â–¾"
if sys.version_info < (3,0):
outputButtonText = outputButtonText.decode("utf-8")
self.m_pushButton.setText(outputButtonText)
def onTick(self):
self.UpdateProjects()
self.UpdateTimeline(True)
self.tick += 1
totalCompletedCompiles = 0
for project in _shared_globals.sortedProjects:
totalCompletedCompiles += project.compilationCompleted
perc = 100 if _shared_globals.total_compiles == 0 else float(totalCompletedCompiles)/float(_shared_globals.total_compiles) * 100
if perc == 100 and not self.readyToClose:
perc = 99
self.m_mainProgressBar.setValue( perc )
self.m_filesCompletedLabel.setText("{}/{} files compiled".format(totalCompletedCompiles, _shared_globals.total_compiles))
curtime = time.time( )
timeDiff = curtime - _shared_globals.starttime
minutes = math.floor( timeDiff / 60 )
seconds = math.floor( timeDiff % 60 )
self.m_buildSummaryLabel.setText("Build Started {0}... ({1}:{2:02})".format( time.asctime(time.localtime(_shared_globals.starttime)), int(minutes), int(seconds) ))
with _shared_globals.sgmutex:
warningcount = _shared_globals.warningcount
errorcount = _shared_globals.errorcount
self.m_warningLabel.setText("Warnings: {}".format(warningcount))
self.m_errorLabel.setText("Errors: {}".format(errorcount))
if warningcount > 0:
font = QtGui.QFont()
font.setBold(True)
self.m_warningLabel.setFont( font )
palette = QtGui.QPalette()
palette.setColor( self.m_warningLabel.foregroundRole(), QtCore.Qt.darkYellow )
self.m_warningLabel.setPalette(palette)
if errorcount > 0:
font = QtGui.QFont()
font.setBold(True)
self.m_errorLabel.setFont( font )
palette = QtGui.QPalette()
palette.setColor( self.m_errorLabel.foregroundRole(), QtCore.Qt.red )
self.m_errorLabel.setPalette(palette)
self.warningErrorCount = warningcount + errorcount
if self.exitRequested:
self.timer.stop()
self.close()
elif self.readyToClose:
self.timer.stop()
def closeEvent(self, event):
if self.exitRequested:
QMainWindow.closeEvent(self, event)
return
if not self.readyToClose:
answer = QtGui.QMessageBox.question(
self,
"Really close?",
"A compile is still in progress. Closing will cancel it. Are you sure you want to close?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.Yes:
QMainWindow.closeEvent(self, event)
self.timer.stop()
csbuild.Exit(0)
else:
event.ignore()
else:
QMainWindow.closeEvent(self, event)
self.timer.stop()
def SetProgressBarUpToDate( self, progressBar, widget, endTime, startTime, forFile ):
if progressBar in self.animatingBars:
del self.animatingBars[progressBar]
widget.setText(2, "Up-to-date!")
progressBar.setTextVisible(True)
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{{
background-color: #{};
}}
QProgressBar
{{
border: 1px solid black;
border-radius: 3px;
background: #505050;
padding: 0px;
text-align: center;
color: black;
}}
""".format( "ADFFD0" if forFile else "00FF80" )
)
progressBar.setValue(100)
progressBar.setFormat("Up-to-date!")
if endTime != 0 and startTime != 0:
widget.setText(10, time.asctime(time.localtime(endTime)))
timeDiff = endTime - startTime
minutes = math.floor( timeDiff / 60 )
seconds = math.floor( timeDiff % 60 )
widget.setText(11, "{0:2}:{1:02}".format( int(minutes), int(seconds) ) )
class GuiThread( threading.Thread ):
"""Multithreaded build system, launches a new thread to run the compiler in.
Uses a threading.BoundedSemaphore object to keep the number of threads equal to the number of processors on the
machine.
"""
def __init__( self ):
"""Initialize the object. Also handles above-mentioned bug with dummy threads."""
threading.Thread.__init__( self )
self.app = None
self.window = None
#Prevent certain versions of python from choking on dummy threads.
if not hasattr( threading.Thread, "_Thread__block" ):
threading.Thread._Thread__block = _shared_globals.dummy_block( )
def run( self ):
self.app = QApplication([])
global lock
lock.release()
window = MainWindow()
self.window = window
window.m_buildTree.setSortingEnabled(False)
row = 0
for project in _shared_globals.sortedProjects:
row += 1
widgetItem = TreeWidgetItem()
window.m_buildTree.addTopLevelItem(widgetItem)
widgetItem.setText(0, str(row))
widgetItem.setText(1, "1000")
widgetItem.setText(2, "Pending...")
widgetItem.setText(3, project.name)
widgetItem.setToolTip(3, project.name)
widgetItem.setText(4, project.targetName)
widgetItem.setToolTip(4, project.targetName)
widgetItem.setText(5, project.outputArchitecture)
widgetItem.setToolTip(5, project.outputArchitecture)
widgetItem.setText(6, project.activeToolchainName)
widgetItem.setToolTip(6, project.activeToolchainName)
widgetItem.setText(7, "0")
widgetItem.setText(8, "0")
widgetItem2 = TreeWidgetWithBarGraph(window.timelineWidget, window.timelineWidget, False)
window.timelineWidget.addTopLevelItem(widgetItem2)
widgetItem2.setText(0, "{} ({} {}/{})".format(project.name, project.targetName, project.outputArchitecture, project.activeToolchainName ))
window.projectToItem[project] = widgetItem
window.itemToProject[str(row)] = project
def AddProgressBar( widgetItem):
progressBar = QtGui.QProgressBar()
progressBar.setStyleSheet(
"""
QProgressBar::chunk
{
background-color: #808080;
}
QProgressBar
{
background-color: #808080;
border: 1px solid black;
border-radius: 3px;
padding: 0px;
text-align: center;
}
"""
)
progressBar.setFormat("Pending...")
progressBar.setValue(0)
window.m_buildTree.setItemWidget( widgetItem, 1, progressBar )
AddProgressBar( widgetItem )
idx = 0
font = QtGui.QFont()
font.setItalic(True)
if project.needsPrecompileCpp:
idx += 1
childItem = TreeWidgetItem( widgetItem )
childItem.setText(0, "{}.{}".format(row, idx))
childItem.setText(1, "1000")
childItem.setText(2, "Pending...")
childItem.setText(3, os.path.basename(project.cppHeaderFile))
childItem.setToolTip(3, project.cppHeaderFile)
childItem.setText(4, project.targetName)
childItem.setToolTip(4, project.targetName)
childItem.setText(5, project.outputArchitecture)
childItem.setToolTip(5, project.outputArchitecture)
childItem.setText(6, project.activeToolchainName)
childItem.setToolTip(6, project.activeToolchainName)
childItem.setText(7, "0")
childItem.setText(8, "0")
childItem.setFont(0, font)
childItem.setFont(1, font)
childItem.setFont(2, font)
childItem.setFont(3, font)
childItem.setFont(4, font)
childItem.setFont(5, font)
childItem.setFont(6, font)
childItem.setFont(7, font)
childItem.setFont(8, font)
childItem.setFont(9, font)
childItem.setFont(10, font)
AddProgressBar( childItem )
widgetItem.addChild(childItem)
timelineChild = TreeWidgetWithBarGraph(widgetItem2, window.timelineWidget, True)
timelineChild.setText(0, os.path.basename(project.cppHeaderFile))
timelineChild.setToolTip(0, project.cppHeaderFile)
widgetItem2.addChild(timelineChild)
for header in project.cppPchContents:
subChildItem = TreeWidgetItem( childItem )
subChildItem.setText( 0, os.path.basename(header) )
subChildItem.setFirstColumnSpanned(True)
subChildItem.setToolTip( 0, header )
childItem.addChild(subChildItem)
timelineSubChild = TreeWidgetItem(timelineChild)
timelineSubChild.setText( 0, os.path.basename(header) )
timelineSubChild.setFirstColumnSpanned(True)
timelineSubChild.setToolTip( 0, header )
timelineChild.addChild(timelineSubChild)
if project.needsPrecompileC:
idx += 1
childItem = TreeWidgetItem( widgetItem )
childItem.setText(0, "{}.{}".format(row, idx))
childItem.setText(1, "1000")
childItem.setText(2, "Pending...")
childItem.setText(3, os.path.basename(project.cHeaderFile))
childItem.setToolTip(3, project.cHeaderFile)
childItem.setText(4, project.targetName)
childItem.setToolTip(4, project.targetName)
childItem.setText(5, project.outputArchitecture)
childItem.setToolTip(5, project.outputArchitecture)
childItem.setText(6, project.activeToolchainName)
childItem.setToolTip(6, project.activeToolchainName)
childItem.setText(7, "0")
childItem.setText(8, "0")
childItem.setFont(0, font)
childItem.setFont(1, font)
childItem.setFont(2, font)
childItem.setFont(3, font)
childItem.setFont(4, font)
childItem.setFont(5, font)
childItem.setFont(6, font)
childItem.setFont(7, font)
childItem.setFont(8, font)
childItem.setFont(9, font)
childItem.setFont(10, font)
AddProgressBar( childItem )
widgetItem.addChild(childItem)
timelineChild = TreeWidgetItem(widgetItem2)
timelineChild.setText(0, os.path.basename(project.cHeaderFile))
timelineChild.setToolTip(0, project.cHeaderFile)
widgetItem2.addChild(timelineChild)
for header in project.cPchContents:
subChildItem = TreeWidgetItem( childItem )
subChildItem.setText( 0, os.path.basename(header) )
subChildItem.setFirstColumnSpanned(True)
subChildItem.setToolTip( 0, header )
childItem.addChild(subChildItem)
timelineSubChild = TreeWidgetItem(timelineChild)
timelineSubChild.setText( 0, os.path.basename(header) )
timelineSubChild.setFirstColumnSpanned(True)
timelineSubChild.setToolTip( 0, header )
timelineChild.addChild(timelineSubChild)
used_chunks = set()
for source in project.allsources:
inThisBuild = False
if source not in project._finalChunkSet:
chunk = project.get_chunk( source )
if not chunk:
continue
extension = "." + source.rsplit(".", 1)[1]
if extension in project.cExtensions:
extension = ".c"
else:
extension = ".cpp"
chunk = os.path.join( project.csbuildDir, "{}{}".format( chunk, extension ) )
if chunk in used_chunks:
continue
if chunk in project._finalChunkSet:
inThisBuild = True
source = chunk
used_chunks.add(chunk)
else:
inThisBuild = True
idx += 1
childItem = TreeWidgetItem( widgetItem )
childItem.setText(0, "{}.{}".format(row, idx))
if inThisBuild:
childItem.setText(1, "1000")
childItem.setText(2, "Pending...")
else:
childItem.setText(1, "100")
#"Up-to-date!" text gets set by window.SetProgressBarUpToDate
name = os.path.basename(source)
if source in project.splitChunks:
name = "[Split Chunk] {}".format(name)
childItem.setText(3, name)
childItem.setToolTip(3, source)
childItem.setText(4, project.targetName)
childItem.setToolTip(4, project.targetName)
childItem.setText(5, project.outputArchitecture)
childItem.setToolTip(5, project.outputArchitecture)
childItem.setText(6, project.activeToolchainName)
childItem.setToolTip(6, project.activeToolchainName)
childItem.setText(7, "0")
childItem.setText(8, "0")
childItem.setFont(0, font)
childItem.setFont(1, font)
childItem.setFont(2, font)
childItem.setFont(3, font)
childItem.setFont(4, font)
childItem.setFont(5, font)
childItem.setFont(6, font)
childItem.setFont(7, font)
childItem.setFont(8, font)
childItem.setFont(9, font)
childItem.setFont(10, font)
AddProgressBar( childItem )
if not inThisBuild:
window.SetProgressBarUpToDate( window.m_buildTree.itemWidget(childItem, 1), childItem, 0, 0, True )
widgetItem.addChild(childItem)
timelineChild = TreeWidgetWithBarGraph(widgetItem2, window.timelineWidget, True)
timelineChild.setText(0, os.path.basename(source))
timelineChild.setToolTip(0, source)
widgetItem2.addChild(timelineChild)
if source in project.chunksByFile:
for piece in project.chunksByFile[source]:
subChildItem = TreeWidgetItem( childItem )
subChildItem.setText( 0, os.path.basename( piece ) )
subChildItem.setFirstColumnSpanned(True)
subChildItem.setToolTip( 0, piece )
childItem.addChild(subChildItem)
timelineSubChild = TreeWidgetItem(timelineChild)
timelineSubChild.setText( 0, os.path.basename(piece) )
timelineSubChild.setFirstColumnSpanned(True)
timelineSubChild.setToolTip( 0, piece )
timelineChild.addChild(timelineSubChild)
window.m_buildTree.setSortingEnabled(True)
window.show()
self.app.exec_()
def stop(self):
self.window.exitRequested = True
_thread = None
lock = threading.Lock()
def run():
global _thread
_thread = GuiThread()
_thread.start()
lock.acquire()
lock.acquire()
def stop():
global _thread
if _thread and threading.current_thread() != _thread:
_thread.stop()
def join():
global _thread
if _thread and threading.current_thread() != _thread:
_thread.join()
| brandonmbare/csbuild | csbuild/_gui.py | Python | mit | 88,329 | 0.0338 |
#! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] path ...
-d Dry run. Analyze, but don't make any changes to, files.
-r Recurse. Search for all .py files in subdirectories too.
-v Verbose. Print informative msgs; else no output.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess whitespace from ends of lines, and empty lines at the ends
of files. Ensure the last line ends with a newline.
Pass one or more file and/or directory paths. When a directory path, all
.py files within the directory will be examined, and, if the -r option is
given, likewise recursively for subdirectories.
Overwrites files in place, renaming the originals with a .bak extension.
If reindent finds nothing to change, the file is left alone. If reindent
does change a file, the changed file is a fixed-point for reindent (i.e.,
running reindent on the resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
"""
__version__ = "1"
import tokenize
import os
import sys
verbose = 0
recurse = 0
dryrun = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun
try:
opts, args = getopt.getopt(sys.argv[1:], "drv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-d':
dryrun += 1
elif o == '-r':
recurse += 1
elif o == '-v':
verbose += 1
if not args:
errprint("Usage:", __doc__)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
if not dryrun:
bak = file + ".bak"
if os.path.exists(bak):
os.remove(bak)
os.rename(file, bak)
if verbose:
print "renamed", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
else:
if verbose:
print "unchanged."
class Reindenter:
def __init__(self, f, eol="\n"):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
self.eol = eol
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [line.rstrip().expandtabs() + self.eol
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == self.eol:
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == self.eol:
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| niavok/perroquet | utils/reindent.py | Python | gpl-3.0 | 8,897 | 0.001236 |
from boto.s3.connection import S3Connection
def main():
conn = S3Connection()
buckets = conn.get_all_buckets()
for b in buckets:
print b.name
if __name__ == "__main__":
main()
| chrisbarr/bilious-rutabaga | bucket_lister/__init__.py | Python | mit | 204 | 0.009804 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple input/output related functionality that is not
part of a larger framework or standard.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
from ...extern.six.moves import range
__all__ = ['fnpickle', 'fnunpickle']
def fnunpickle(fileorname, number=0, usecPickle=True):
""" Unpickle pickled objects from a specified file and return the contents.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
usecPickle : bool
If True, the :mod:`cPickle` module is to be used in place of
:mod:`pickle` (cPickle is faster). This only applies for python 2.x.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : obj or list
If ``number`` is 0, this is a individual object - the first one unpickled
from the file. Otherwise, it is a list of objects unpickled from the
file.
"""
if usecPickle and six.PY2:
import cPickle as pickle
else:
import pickle
if isinstance(fileorname, six.string_types):
f = open(fileorname, 'rb')
close = True
else:
f = fileorname
close = False
try:
if number > 0: # get that number
res = []
for i in range(number):
res.append(pickle.load(f))
elif number < 0: # get all objects
res = []
eof = False
while not eof:
try:
res.append(pickle.load(f))
except EOFError:
eof = True
else: # number==0
res = pickle.load(f)
finally:
if close:
f.close()
return res
def fnpickle(object, fileorname, usecPickle=True, protocol=None, append=False):
"""Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
The filename or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
usecPickle : bool
If True (default), the :mod:`cPickle` module is to be used in place of
:mod:`pickle` (cPickle is faster). This only applies for python 2.x.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect).
"""
if usecPickle and six.PY2:
import cPickle as pickle
else:
import pickle
if protocol is None:
protocol = pickle.HIGHEST_PROTOCOL
if isinstance(fileorname, six.string_types):
f = open(fileorname, 'ab' if append else 'wb')
close = True
else:
f = fileorname
close = False
try:
pickle.dump(object, f, protocol=protocol)
finally:
if close:
f.close()
| AustereCuriosity/astropy | astropy/io/misc/pickle_helpers.py | Python | bsd-3-clause | 3,779 | 0.000265 |
from gene_acronym_query import GeneAcronymQuery
query = GeneAcronymQuery()
gene_info = query.get_data('ABAT')
for gene in gene_info:
print "%s (%s)" % (gene['name'], gene['organism']['name'])
| wvangeit/AllenSDK | doc_template/examples/data_api_client_ex2.py | Python | gpl-3.0 | 197 | 0 |
"""Tests for GP and SP classes"""
import math
import unittest
import numpy as np
from gpkit import (Model, Monomial, settings, VectorVariable, Variable,
SignomialsEnabled, ArrayVariable)
from gpkit.geometric_program import GeometricProgram
from gpkit.small_classes import CootMatrix
from gpkit.feasibility import feasibility_model
NDIGS = {"cvxopt": 5, "mosek": 7, "mosek_cli": 5}
# name: decimal places of accuracy
class TestGP(unittest.TestCase):
"""
Test GeometricPrograms.
This TestCase gets run once for each installed solver.
"""
name = "TestGP_"
# solver and ndig get set in loop at bottom this file, a bit hacky
solver = None
ndig = None
def test_trivial_gp(self):
"""
Create and solve a trivial GP:
minimize x + 2y
subject to xy >= 1
The global optimum is (x, y) = (sqrt(2), 1/sqrt(2)).
"""
x = Monomial('x')
y = Monomial('y')
prob = Model(cost=(x + 2*y),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(type(prob.latex()), str)
self.assertEqual(type(prob._repr_latex_()), str)
self.assertAlmostEqual(sol("x"), math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("y"), 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("x") + 2*sol("y"),
2*math.sqrt(2),
self.ndig)
self.assertAlmostEqual(sol["cost"], 2*math.sqrt(2), self.ndig)
def test_simple_united_gp(self):
R = Variable('R', units="nautical_miles")
a0 = Variable('a0', 340.29, 'm/s')
theta = Variable(r'\theta', 0.7598)
t = Variable('t', 10, 'hr')
T_loiter = Variable('T_{loiter}', 1, 'hr')
T_reserve = Variable('T_{reserve}', 45, 'min')
M = VectorVariable(2, 'M')
if R.units:
prob = Model(1/R,
[t >= sum(R/a0/M/theta**0.5) + T_loiter + T_reserve,
M <= 0.76])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 0.0005532, self.ndig)
def test_trivial_vector_gp(self):
"""
Create and solve a trivial GP with VectorVariables
"""
x = VectorVariable(2, 'x')
y = VectorVariable(2, 'y')
prob = Model(cost=(sum(x) + 2*sum(y)),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(sol('x').shape, (2,))
self.assertEqual(sol('y').shape, (2,))
for x, y in zip(sol('x'), sol('y')):
self.assertAlmostEqual(x, math.sqrt(2.), self.ndig)
self.assertAlmostEqual(y, 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol["cost"]/(4*math.sqrt(2)), 1., self.ndig)
def test_zero_lower_unbounded(self):
x = Variable('x', value=4)
y = Variable('y', value=0)
z = Variable('z')
t1 = Variable('t1')
t2 = Variable('t2')
prob = Model(z, [z >= x + t1,
t1 >= t2,
t2 >= y])
sol = prob.solve(verbosity=0)
def test_mdd_example(self):
Cl = Variable("Cl", 0.5, "-", "Lift Coefficient")
Mdd = Variable("Mdd", "-", "Drag Divergence Mach Number")
m1 = Model(1/Mdd, [1 >= 5*Mdd + 0.5, Mdd >= 0.00001])
m2 = Model(1/Mdd, [1 >= 5*Mdd + 0.5])
m3 = Model(1/Mdd, [1 >= 5*Mdd + Cl, Mdd >= 0.00001])
sol1 = m1.solve(solver=self.solver, verbosity=0)
sol2 = m2.solve(solver=self.solver, verbosity=0)
sol3 = m3.solve(solver=self.solver, verbosity=0)
gp1, gp2, gp3 = [m.program for m in [m1, m2, m3]]
self.assertEqual(gp1.A, CootMatrix(row=[0, 1, 2],
col=[0, 0, 0],
data=[-1, 1, -1]))
self.assertEqual(gp2.A, CootMatrix(row=[0, 1],
col=[0, 0],
data=[-1, 1]))
# order of variables within a posynomial is not stable
# (though monomial order is)
equiv1 = gp3.A == CootMatrix(row=[0, 2, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
equiv2 = gp3.A == CootMatrix(row=[0, 1, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
self.assertTrue(equiv1 or equiv2)
self.assertAlmostEqual(sol1(Mdd), sol2(Mdd))
self.assertAlmostEqual(sol1(Mdd), sol3(Mdd))
self.assertAlmostEqual(sol2(Mdd), sol3(Mdd))
def test_additive_constants(self):
x = Variable('x')
m = Model(1/x, [1 >= 5*x + 0.5, 1 >= 10*x])
m.solve(verbosity=0)
gp = m.program
self.assertEqual(gp.cs[1], gp.cs[2])
self.assertEqual(gp.A.data[1], gp.A.data[2])
def test_zeroing(self):
L = Variable("L")
k = Variable("k", 0)
with SignomialsEnabled():
constr = [L-5*k <= 10]
sol = Model(1/L, constr).solve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol(L), 10, self.ndig)
self.assertAlmostEqual(sol["cost"], 0.1, self.ndig)
def test_singular(self):
"""
Create and solve GP with a singular A matrix
"""
if self.solver == 'cvxopt':
# cvxopt can't solve this problem
# (see https://github.com/cvxopt/cvxopt/issues/36)
return
x = Variable('x')
y = Variable('y')
m = Model(y*x, [y*x >= 12])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 12, self.ndig)
def test_constants_in_objective_1(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(1.+ x1 + x2, [x1 >= 1., x2 >= 1.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 3, self.ndig)
def test_constants_in_objective_2(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(x1**2 + 100 + 3*x2, [x1 >= 10., x2 >= 15.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"]/245., 1, self.ndig)
def test_feasibility_gp_(self):
x = Variable('x')
m = Model(x, [x**2 >= 1, x <= 0.5])
self.assertRaises(RuntimeWarning, m.solve, verbosity=0)
fm = feasibility_model(m, "max")
sol1 = fm.solve(verbosity=0)
fm = feasibility_model(m, "product")
sol2 = fm.solve(verbosity=0)
self.assertTrue(sol1["cost"] >= 1)
self.assertTrue(sol2["cost"] >= 1)
def test_terminating_constant_(self):
x = Variable('x')
y = Variable('y', value=0.5)
prob = Model(1/x, [x + y <= 4])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1/3.5, self.ndig)
def test_check_result(self):
"""issue 361"""
N = 5
L = 5.
dx = L/(N-1)
EI = Variable("EI",10)
p = VectorVariable(N, "p")
p = p.sub(p, 100*np.ones(N))
V = VectorVariable(N, "V")
M = VectorVariable(N, "M")
th = VectorVariable(N, "th")
w = VectorVariable(N, "w")
eps = 1E-6
substitutions = {var: eps for var in [V[-1], M[-1], th[0], w[0]]}
objective = w[-1]
constraints = [EI*V.left[1:N] >= EI*V[1:N] + 0.5*dx*p.left[1:N] + 0.5*dx*p[1:N],
EI*M.left[1:N] >= EI*M[1:N] + 0.5*dx*V.left[1:N] + 0.5*dx*V[1:N],
EI*th.right[0:N-1] >= EI*th[0:N-1] + 0.5*dx*M.right[0:N-1] + 0.5*dx*M[0:N-1],
EI*w.right[0:N-1] >= EI*w[0:N-1] + 0.5*dx*th.right[0:N-1] + 0.5*dx*th[0:N-1]]
m = Model(objective, constraints, substitutions)
sol = m.solve(verbosity=0)
def test_exps_is_tuple(self):
"""issue 407"""
x = Variable('x')
m = Model(x, [x >= 1])
m.solve(verbosity=0)
self.assertEqual(type(m.program.cost.exps), tuple)
class TestSP(unittest.TestCase):
"""test case for SP class -- gets run for each installed solver"""
name = "TestSP_"
solver = None
ndig = None
def test_trivial_sp(self):
x = Variable('x')
y = Variable('y')
with SignomialsEnabled():
m = Model(x, [x >= 1-y, y <= 0.1])
sol = m.localsolve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol["variables"]["x"], 0.9, self.ndig)
with SignomialsEnabled():
m = Model(x, [x+y >= 1, y <= 0.1])
sol = m.localsolve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol["variables"]["x"], 0.9, self.ndig)
def test_relaxation(self):
x = Variable("x")
y = Variable("y")
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
m = Model(objective, constraints)
m.localsolve(verbosity=0)
# issue #257
A = VectorVariable(2, "A")
B = ArrayVariable([2, 2], "B")
C = VectorVariable(2, "C")
with SignomialsEnabled():
constraints = [A <= B.dot(C),
B <= 1,
C <= 1]
obj = 1/A[0] + 1/A[1]
m = Model(obj, constraints)
m.localsolve(verbosity=0)
def test_issue180(self):
L = Variable("L")
Lmax = Variable("L_{max}", 10)
W = Variable("W")
Wmax = Variable("W_{max}", 10)
A = Variable("A", 10)
Obj = Variable("Obj")
a_val = 0.01
a = Variable("a", a_val)
with SignomialsEnabled():
eqns = [L <= Lmax,
W <= Wmax,
L*W >= A,
Obj >= a*(2*L + 2*W) + (1-a)*(12 * W**-1 * L**-3)]
m = Model(Obj, eqns)
spsol = m.solve(verbosity=0, solver=self.solver)
# now solve as GP
eqns[-1] = (Obj >= a_val*(2*L + 2*W) + (1-a_val)*(12 * W**-1 * L**-3))
m = Model(Obj, eqns)
gpsol = m.solve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(spsol['cost'], gpsol['cost'])
def test_trivial_sp2(self):
x = Variable("x")
y = Variable("y")
# converging from above
with SignomialsEnabled():
constraints = [y + x >= 2, y >= x]
objective = y
x0 = 1
y0 = 2
m = Model(objective, constraints)
sol1 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)
# converging from right
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
x0 = 2
y0 = 1
m = Model(objective, constraints)
sol2 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol1["variables"]["x"],
sol2["variables"]["x"], self.ndig)
self.assertAlmostEqual(sol1["variables"]["y"],
sol2["variables"]["x"], self.ndig)
def test_sp_initial_guess_sub(self):
x = Variable("x")
y = Variable("y")
x0 = 1
y0 = 2
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
m = Model(objective, constraints)
try:
sol = m.localsolve(x0={x: x0, y: y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with only variables failed")
self.assertAlmostEqual(sol(x), 1, self.ndig)
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
try:
sol = m.localsolve(x0={"x": x0, "y": y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with only variable strings failed")
self.assertAlmostEqual(sol("x"), 1, self.ndig)
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
try:
sol = m.localsolve(x0={"x": x0, y: y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with a mix of variable strings "
"and variables failed")
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
def test_small_signomial(self):
x = Variable('x')
z = Variable('z')
local_ndig = 4
nonzero_adder = 0.1 # TODO: support reaching zero, issue #348
with SignomialsEnabled():
J = 0.01*(x - 1)**2 + nonzero_adder
m = Model(z, [z >= J])
sol = m.localsolve(verbosity=0)
self.assertAlmostEqual(sol['cost'], nonzero_adder, local_ndig)
self.assertAlmostEqual(sol('x'), 0.987, 3)
def test_signomials_not_allowed_in_objective(self):
with SignomialsEnabled():
x = Variable('x')
y = Variable('y')
J = 0.01*((x - 1)**2 + (y - 1)**2) + (x*y - 1)**2
m = Model(J)
with self.assertRaises(TypeError):
sol = m.localsolve(verbosity=0)
def test_partial_sub_signomial(self):
"""Test SP partial x0 initialization"""
x = Variable('x')
y = Variable('y')
with SignomialsEnabled():
m = Model(x, [x + y >= 1, y <= 0.5])
m.localsolve(x0={x: 0.5}, verbosity=0)
self.assertEqual(m.program.gps[0].constraints[0].exp[x], -1./3)
TEST_CASES = [TestGP, TestSP]
TESTS = []
for testcase in TEST_CASES:
for solver in settings["installed_solvers"]:
if solver:
test = type(testcase.__name__+"_"+solver,
(testcase,), {})
setattr(test, "solver", solver)
setattr(test, "ndig", NDIGS[solver])
TESTS.append(test)
if __name__ == "__main__":
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| galbramc/gpkit | gpkit/tests/t_model.py | Python | mit | 14,190 | 0.001339 |
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth package init file
"""
__version__ = '0.13.1'
| freedomboxtwh/Plinth | plinth/__init__.py | Python | agpl-3.0 | 750 | 0 |
#! /usr/bin/env python
import argparse, re, os
parser = argparse.ArgumentParser(description = 'Download genome/transcript sequences and gene annotations')
parser.add_argument('species', choices=['hg19','mm10','TAIR10'], help='choose a species (Human, Mouse, Arabidopsis)')
parser.add_argument('-d', '--download', action='store_true', help='download sequences or annotations')
parser.add_argument('-b', '--build', action='store_true', help='build sequences or annotations')
parser.add_argument('-g', '--genome', action='store_true', help='download or build genome sequences')
parser.add_argument('-t', '--transcriptome', action='store_true', help='download or build transcriptome sequences')
parser.add_argument('-a', '--annotation', action='store_true', help='download or build gene annotations')
args = parser.parse_args()
genome_dict = {'hg19': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz',
'mm10': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/chromFa.tar.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.*.dna.toplevel.fa.gz'}
trans_dict = {'hg19': ['ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/ncrna/Homo_sapiens.*.ncrna.fa.gz'],
'mm10': ['ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/ncrna/Mus_musculus.*.ncrna.fa.gz'],
'TAIR10': ['ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.all.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/ncrna/Arabidopsis_thaliana.*.ncrna.fa.gz']}
anno_dict = {'hg19': 'ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/*.gtf.gz',
'mm10': 'ftp://ftp.ensembl.org/pub/current_gtf/mus_musculus/*.gtf.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants//gtf/arabidopsis_thaliana/*.gtf.gz'}
def gtf_build(gtf, build):
input_file = open(gtf,'r')
output_file = open(build,'w')
tx2gene = {}
tx2exon_starts = {}
tx2exon_ends = {}
tx2cds_starts = {}
tx2cds_ends = {}
for line in input_file:
if line.startswith('#'):
continue
line_list = line.strip().split('\t')
chrom, biotype, feature, start, end, strand, ID = (line_list[0],line_list[1],line_list[2],line_list[3],line_list[4],line_list[6],line_list[8])
if gtf == 'hg19.gtf' or gtf == 'mm10.gtf':
chrom = 'chr' + chrom
start = str(int(start) - 1) ## 0-based
if re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID) is not None:
gene_id, tx_id = re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID).groups()
tx2gene[tx_id] = '%s|%s|%s|%s' % (chrom, strand, gene_id, biotype)
if feature == 'exon':
tx2exon_starts[tx_id] = start + ',' + tx2exon_starts.get(tx_id, '')
tx2exon_ends[tx_id] = end + ',' + tx2exon_ends.get(tx_id, '')
if feature == 'CDS':
tx2cds_starts[tx_id] = start + ',' + tx2cds_starts.get(tx_id, '')
tx2cds_ends[tx_id] = end + ',' + tx2cds_ends.get(tx_id, '')
gene2repretx = {} ## representative transcript (repretx) is the longest transcript for each gene
trans2len = {}
for tx_id in tx2gene:
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
exon_starts = sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])
exon_ends = sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])
tx_len = 0
for i in range(len(exon_starts)):
tx_len += (exon_ends[i] - exon_starts[i])
trans2len[tx_id] = tx_len
if gene_id in gene2repretx:
if tx_len > trans2len[gene2repretx[gene_id]]:
gene2repretx[gene_id] = tx_id
else:
gene2repretx[gene_id] = tx_id
for tx_id in sorted(tx2gene):
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
if tx_id == gene2repretx[gene_id]:
exon_starts = [str(j) for j in sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])]
exon_ends = [str(j) for j in sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])]
tx_start = exon_starts[0]
tx_end = exon_ends[-1]
cds_start = '.'
cds_end = '.'
if tx_id in tx2cds_starts:
cds_starts = [str(j) for j in sorted([int(i) for i in tx2cds_starts[tx_id].strip(',').split(',')])]
cds_ends = [str(j) for j in sorted([int(i) for i in tx2cds_ends[tx_id].strip(',').split(',')])]
cds_start = cds_starts[0]
cds_end = cds_ends[-1]
output_file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (chrom, tx_start, tx_end, cds_start, cds_end, strand, ','.join(exon_starts), ','.join(exon_ends), tx_id, gene_id, biotype))
if args.download:
if args.genome:
print '[download %s genome]' % args.species
if args.species == 'hg19' or args.species == 'mm10':
print 'wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species))
print 'tar -zxf %s.tar.gz' % args.species
os.system('tar -zxf %s.tar.gz' % args.species)
print 'cat chr*.fa > %s_dna.fa' % args.species
os.system('cat chr*.fa > %s_dna.fa' % args.species)
print 'rm chr*.fa'
os.system('rm chr*.fa')
else:
print 'wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species))
print 'zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species)
os.system('zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species))
print 'rm %s.fa.gz' % args.species
os.system('rm %s.fa.gz' % args.species)
elif args.transcriptome:
print '[download %s transcriptome]' % args.species
for i in trans_dict[args.species]:
print 'wget -q %s' % i
os.system('wget -q %s' % i)
print 'zcat *.fa.gz > %s_trans.fa' % args.species
os.system('zcat *.fa.gz > %s_trans.fa' % args.species)
print 'rm *.fa.gz'
os.system('rm *.fa.gz')
elif args.annotation:
print '[download %s gene annotation]' % args.species
print 'wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species)
os.system('wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species))
print 'gzip -d %s.gtf.gz' % args.species
os.system('gzip -d %s.gtf.gz' % args.species)
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
elif args.build:
if args.genome:
print '[build %s genome]' % args.species
print 'bowtie-build %s_dna.fa %s_dna' % (args.species, args.species)
os.system('bowtie-build %s_dna.fa %s_dna' % (args.species, args.species))
elif args.transcriptome:
print '[build %s transcriptome]' % args.species
print 'bowtie-build %s_trans.fa %s_trans' % (args.species, args.species)
os.system('bowtie-build %s_trans.fa %s_trans' % (args.species, args.species))
elif args.annotation:
print '[build %s gene annotation]' % args.species
print 'gtf_build(%s.gtf, %s.gtf.build)' % (args.species, args.species)
gtf_build(args.species+'.gtf', args.species+'.gtf.build')
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
else:
print 'please specify -d/--download or -b/--build'
| bioxfu/circRNAFinder | src/SeqAnnoDownloadBuild.py | Python | gpl-3.0 | 7,557 | 0.025275 |
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
import shlex
import shutil
import socket
import subprocess
import sys
import itertools
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile('([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
if sys.version_info[0] >= 3:
compat_etree_fromstring = xml.etree.ElementTree.fromstring
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
etree = xml.etree.ElementTree
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=etree.TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
if sys.version_info >= (2, 7, 3):
compat_shlex_split = shlex.split
else:
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return shlex.split(s, comments, posix)
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
compat_os_name = os._name if os.name == 'java' else os.name
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name == 'nt' or compat_os_name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
__all__ = [
'compat_HTMLParser',
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookies',
'compat_etree_fromstring',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_http_client',
'compat_http_server',
'compat_itertools_count',
'compat_kwargs',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'shlex_quote',
'subprocess_check_output',
'workaround_optparse_bug9161',
]
| nfedera/rg3-youtube-dl | youtube_dl/compat.py | Python | unlicense | 21,183 | 0.001275 |
# coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as ksexception
from oslo_config import cfg
from six.moves.urllib import parse
from iotronic.common import exception
from iotronic.common.i18n import _
CONF = cfg.CONF
keystone_opts = [
cfg.StrOpt('region_name',
help='The region used for getting endpoints of OpenStack'
'services.'),
]
CONF.register_opts(keystone_opts, group='keystone')
CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
def _is_apiv3(auth_url, auth_version):
"""Checks if V3 version of API is being used or not.
This method inspects auth_url and auth_version, and checks whether V3
version of the API is being used or not.
:param auth_url: a http or https url to be inspected (like
'http://127.0.0.1:9898/').
:param auth_version: a string containing the version (like 'v2', 'v3.0')
:returns: True if V3 of the API is being used.
"""
return auth_version == 'v3.0' or '/v3' in parse.urlparse(auth_url).path
def _get_ksclient(token=None):
auth_url = CONF.keystone_authtoken.auth_uri
if not auth_url:
raise exception.KeystoneFailure(_('Keystone API endpoint is missing'))
auth_version = CONF.keystone_authtoken.auth_version
api_v3 = _is_apiv3(auth_url, auth_version)
if api_v3:
from keystoneclient.v3 import client
else:
from keystoneclient.v2_0 import client
auth_url = get_keystone_url(auth_url, auth_version)
try:
if token:
return client.Client(token=token, auth_url=auth_url)
else:
return client.Client(
username=CONF.keystone_authtoken.admin_user,
password=CONF.keystone_authtoken.admin_password,
tenant_name=CONF.keystone_authtoken.admin_tenant_name,
region_name=CONF.keystone.region_name,
auth_url=auth_url)
except ksexception.Unauthorized:
raise exception.KeystoneUnauthorized()
except ksexception.AuthorizationFailure as err:
raise exception.KeystoneFailure(_('Could not authorize in Keystone:'
' %s') % err)
def get_keystone_url(auth_url, auth_version):
"""Gives an http/https url to contact keystone.
Given an auth_url and auth_version, this method generates the url in
which keystone can be reached.
:param auth_url: a http or https url to be inspected (like
'http://127.0.0.1:9898/').
:param auth_version: a string containing the version (like v2, v3.0, etc)
:returns: a string containing the keystone url
"""
api_v3 = _is_apiv3(auth_url, auth_version)
api_version = 'v3' if api_v3 else 'v2.0'
# NOTE(lucasagomes): Get rid of the trailing '/' otherwise urljoin()
# fails to override the version in the URL
return parse.urljoin(auth_url.rstrip('/'), api_version)
def get_service_url(service_type='iot', endpoint_type='internal'):
"""Wrapper for get service url from keystone service catalog.
Given a service_type and an endpoint_type, this method queries keystone
service catalog and provides the url for the desired endpoint.
:param service_type: the keystone service for which url is required.
:param endpoint_type: the type of endpoint for the service.
:returns: an http/https url for the desired endpoint.
"""
ksclient = _get_ksclient()
if not ksclient.has_service_catalog():
raise exception.KeystoneFailure(_('No Keystone service catalog '
'loaded'))
try:
endpoint = ksclient.service_catalog.url_for(
service_type=service_type,
endpoint_type=endpoint_type,
region_name=CONF.keystone.region_name)
except ksexception.EndpointNotFound:
raise exception.CatalogNotFound(service_type=service_type,
endpoint_type=endpoint_type)
return endpoint
def get_admin_auth_token():
"""Get an admin auth_token from the Keystone."""
ksclient = _get_ksclient()
return ksclient.auth_token
def token_expires_soon(token, duration=None):
"""Determines if token expiration is about to occur.
:param duration: time interval in seconds
:returns: boolean : true if expiration is within the given duration
"""
ksclient = _get_ksclient(token=token)
return ksclient.auth_ref.will_expire_soon(stale_duration=duration)
| MDSLab/s4t-iotronic | iotronic/common/keystone.py | Python | apache-2.0 | 5,025 | 0 |
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
An in-memory implementation of the Kubernetes client interface.
"""
from json import loads
import attr
from pyrsistent import InvariantException, PClass, field, pset
from zope.interface import Interface, implementer
from twisted.python.url import URL
from twisted.web.http import CREATED, NOT_FOUND, OK
from eliot import start_action
from klein import Klein
from werkzeug.exceptions import NotFound
from treq.testing import RequestTraversalAgent
from . import (
IKubernetes, KubernetesError, network_kubernetes,
v1_5_model,
)
from ._compat import dumps_bytes
def memory_kubernetes():
"""
Create an in-memory Kubernetes-alike service.
This serves as a places to hold state for stateful Kubernetes interactions
allowed by ``IKubernetesClient``. Only clients created against the same
instance will all share state.
:return IKubernetes: The new Kubernetes-alike service.
"""
return _MemoryKubernetes()
@implementer(IKubernetes)
class _MemoryKubernetes(object):
"""
``_MemoryKubernetes`` maintains state in-memory which approximates
the state of a real Kubernetes deployment sufficiently to expose a
subset of the external Kubernetes API.
:ivar model: All of the Kubernetes model objects as understood by this
service.
"""
def __init__(self):
base_url = URL.fromText(u"https://kubernetes.example.invalid./")
self.model = v1_5_model
self._state = _KubernetesState.for_model(self.model)
self._resource = _kubernetes_resource(self, self.model)
self._kubernetes = network_kubernetes(
base_url=base_url,
agent=RequestTraversalAgent(self._resource),
)
def _state_changed(self, state):
"""
The Kubernetes state has been changed. Record the new version.
The state is immutable so any changes must be represented as a brand
new object.
:param _KubernetesState state: The new state.
"""
self._state = state
def versioned_client(self, *args, **kwargs):
"""
:return IKubernetesClient: A new client which interacts with this
object rather than a real Kubernetes deployment.
"""
return self._kubernetes.versioned_client(*args, **kwargs)
def client(self, *args, **kwargs):
"""
:return IKubernetesClient: A new client which interacts with this
object rather than a real Kubernetes deployment.
"""
return self._kubernetes.client(*args, **kwargs)
def _kubernetes_resource(memory_service, model):
return _Kubernetes(memory_service, model).app.resource()
def _incrementResourceVersion(version):
"""
Pyrsistent transformation function which can increment a
``v1.ObjectMeta.resourceVersion`` value (even if it was missing).
:param version: The old version as a ``unicode`` string or ``None`` if
there wasn't one.
:return unicode: The new version, guaranteed to be greater than the old
one.
"""
if version is None:
version = 0
return u"{}".format(int(version) + 1)
def _transform_object(obj, *transformation):
"""
Apply a pyrsistent transformation to an ``IObject``.
In addition to the given transformation, the object's resourceVersion will
be updated.
:param IObject: obj: The object to transform.
:param *transformation: Arguments like those to ``PClass.transform``.
:return: The transformed object.
"""
return obj.transform(
[u"metadata", u"resourceVersion"],
_incrementResourceVersion,
*transformation
)
def _api_group_for_type(cls):
"""
Determine which Kubernetes API group a particular PClass is likely to
belong with.
This is basically nonsense. The question being asked is wrong. An
abstraction has failed somewhere. Fixing that will get rid of the need
for this.
"""
_groups = {
(u"v1beta1", u"Deployment"): u"extensions",
(u"v1beta1", u"DeploymentList"): u"extensions",
(u"v1beta1", u"ReplicaSet"): u"extensions",
(u"v1beta1", u"ReplicaSetList"): u"extensions",
}
key = (
cls.apiVersion,
cls.__name__.rsplit(u".")[-1],
)
group = _groups.get(key, None)
return group
class IAgency(Interface):
"""
An ``IAgency`` implementation can impress certain additional behaviors
upon a ``_KubernetesState``. The latter shall use methods of the former
during state changes to give the former an opportunity to influence the
outcome of the state change.
"""
def before_create(state, obj):
"""
This is called before an object is created.
:param _KubernetesState state: The state in which the object is being
created.
:param IObject obj: A description of the object to be created.
:return IObject: The object to really create. Typically this is some
transformation of ``obj`` (for example, with default values
populated).
"""
def after_create(state, obj):
"""
This is called after an object has been created.
:param _KubernetesState state: The state in which the object is being
created.
:param IObject obj: A description of the object created. Regardless
of the implementation of this method, this is the description
which will be returned in the response to the create operation.
:return IObject: The object to store in the state. Typically this is
some transformation of ``obj`` (for example, with an observed
status attached)l.
"""
def before_replace(state, old, new):
"""
This is called before an existing object is replaced by a new one.
:param _KubernetesState state: The state in which the object is being
replaced.
:param IObject old: A description of the object being replaced.
:param IObject new: A description of the object to replace ``old``.
:raise: Some exception to prevent the replacement from taking place.
:return: ``None``
"""
@implementer(IAgency)
class NullAgency(object):
"""
``NullAgency`` does nothing.
"""
def before_create(self, state, obj):
return obj
def after_create(self, state, obj):
return obj
def before_replace(self, state, old, new):
pass
@implementer(IAgency)
@attr.s(frozen=True)
class AdHocAgency(object):
"""
``AdHocAgency`` implements some object changes which I observed to happen
on a real Kubernetes server while I was working on various parts of
txkube. No attempt at completeness attempted. The system for selecting
changes to implement is to run into important inconsistencies between this
and a real Kubernetes while developing other features and then fix those
inconsistencies.
Perhaps in the future this will be replaced by something with less of an
ad hoc nature.
"""
model = attr.ib()
def before_create(self, state, obj):
return obj.fill_defaults()
def after_create(self, state, obj):
if isinstance(obj, self.model.v1beta1.Deployment):
obj = _transform_object(
obj,
[u"metadata", u"annotations", u"deployment.kubernetes.io/revision"],
u"1",
[u"status"],
{},
[u"status", u"observedGeneration"],
1,
[u"status", u"unavailableReplicas"],
1,
)
return obj
def before_replace(self, state, old, new):
if old.metadata.resourceVersion != new.metadata.resourceVersion:
group = _api_group_for_type(type(old))
details = {
u"group": group,
u"kind": old.kind,
u"name": old.metadata.name,
}
raise KubernetesError.object_modified(details)
class _KubernetesState(PClass):
"""
``_KubernetesState`` contains the canonical representation of internal
state required to emulate a Kubernetes server.
:ivar IAgency agency: Any behavior to apply to transformations of this
state.
"""
agency = field()
namespaces = field()
configmaps = field()
services = field()
pods = field()
deployments = field()
replicasets = field()
@classmethod
def for_model(cls, model):
return cls(
agency=AdHocAgency(model=model),
namespaces=model.v1.NamespaceList(),
configmaps=model.v1.ConfigMapList(),
services=model.v1.ServiceList(),
pods=model.v1.PodList(),
deployments=model.v1beta1.DeploymentList(),
replicasets=model.v1beta1.ReplicaSetList(),
)
def create(self, collection_name, obj):
"""
Create a new object in the named collection.
:param unicode collection_name: The name of the collection in which to
create the object.
:param IObject obj: A description of the object to create.
:return _KubernetesState: A new state based on the current state but
also containing ``obj``.
"""
obj = self.agency.before_create(self, obj)
new = self.agency.after_create(self, obj)
updated = self.transform(
[collection_name],
lambda c: c.add(new),
)
return updated
def replace(self, collection_name, old, new):
"""
Replace an existing object with a new version of it.
:param unicode collection_name: The name of the collection in which to
replace an object.
:param IObject old: A description of the object being replaced.
:param IObject new: A description of the object to take the place of
``old``.
:return _KubernetesState: A new state based on the current state but
also containing ``obj``.
"""
self.agency.before_replace(self, old, new)
updated = self.transform(
[collection_name],
lambda c: c.replace(old, new),
)
return updated
def delete(self, collection_name, obj):
"""
Delete an existing object.
:param unicode collection_name: The name of the collection from which
to delete the object.
:param IObject obj: A description of the object to delete.
:return _KubernetesState: A new state based on the current state but
not containing ``obj``.
"""
updated = self.transform(
[collection_name],
lambda c: obj.delete_from(c),
)
return updated
def response(request, status, obj):
"""
Generate a response.
:param IRequest request: The request being responsed to.
:param int status: The response status code to set.
:param obj: Something JSON-dumpable to write into the response body.
:return bytes: The response body to write out. eg, return this from a
*render_* method.
"""
request.setResponseCode(status)
request.responseHeaders.setRawHeaders(
u"content-type", [u"application/json"],
)
body = dumps_bytes(obj)
return body
@attr.s(frozen=True)
class _Kubernetes(object):
"""
A place to stick a bunch of Klein definitions.
:ivar _MemoryKubernetes service: The Kubernetes-alike holding the
in-memory Kubernetes state with which the API will be interacting.
:ivar model: All of the Kubernetes model objects as understood by this
service.
"""
service = attr.ib()
model = attr.ib()
# This could be a property except
# https://github.com/hynek/attrs/issues/144
def _get_state(self):
return self.service._state
def _set_state(self, value):
self.service._state_changed(value)
def _reduce_to_namespace(self, collection, namespace):
# Unfortunately pset does not support transform. :( Use this more
# verbose .set() operation.
return collection.set(
u"items",
pset(obj for obj in collection.items if obj.metadata.namespace == namespace),
)
def _collection_by_name(self, collection_name):
return getattr(self._get_state(), collection_name)
def _list(self, request, namespace, collection_name):
with start_action(action_type=u"memory:list", kind=collection_name):
collection = self._collection_by_name(collection_name)
if namespace is not None:
collection = self._reduce_to_namespace(collection, namespace)
return response(request, OK, self.model.iobject_to_raw(collection))
def _get(self, request, collection_name, namespace, name):
collection = self._collection_by_name(collection_name)
if namespace is not None:
collection = self._reduce_to_namespace(collection, namespace)
try:
obj = collection.item_by_name(name)
except KeyError:
raise KubernetesError.not_found({
u"name": name,
u"kind": collection_name,
u"group": _api_group_for_type(type(collection))
})
return response(request, OK, self.model.iobject_to_raw(obj))
def _create(self, request, collection_name):
with start_action(action_type=u"memory:create", kind=collection_name):
obj = self.model.iobject_from_raw(loads(request.content.read()))
try:
state = self._get_state().create(collection_name, obj)
except InvariantException:
collection = getattr(self._get_state(), collection_name)
details = {
u"name": obj.metadata.name,
u"kind": collection_name,
u"group": _api_group_for_type(type(collection)),
}
raise KubernetesError.already_exists(details)
self._set_state(state)
return response(request, CREATED, self.model.iobject_to_raw(obj))
def _replace(self, request, collection_name, namespace, name):
collection = self._collection_by_name(collection_name)
if namespace is not None:
collection = self._reduce_to_namespace(collection, namespace)
old = collection.item_by_name(name)
new = self.model.iobject_from_raw(loads(request.content.read()))
try:
state = self._get_state().replace(collection_name, old, new)
except KubernetesError as e:
return response(request, e.code, self.model.iobject_to_raw(e.status))
self._set_state(state)
return response(request, OK, self.model.iobject_to_raw(new))
def _delete(self, request, collection_name, namespace, name):
collection = self._collection_by_name(collection_name)
if namespace is not None:
collection = self._reduce_to_namespace(collection, namespace)
try:
obj = collection.item_by_name(name)
except KeyError:
raise KubernetesError.not_found({
u"group": _api_group_for_type(type(collection)),
u"kind": collection_name,
u"name": name,
})
self._set_state(self._get_state().delete(collection_name, obj))
return response(request, OK, self.model.iobject_to_raw(obj))
app = Klein()
@app.handle_errors(NotFound)
def not_found(self, request, name):
return response(
request,
NOT_FOUND,
self.model.iobject_to_raw(self.model.v1.Status(
status=u"Failure",
message=u"the server could not find the requested resource",
reason=u"NotFound",
details={},
metadata={},
code=NOT_FOUND,
)),
)
@app.handle_errors(KubernetesError)
def object_not_found(self, request, reason):
exc = reason.value
return response(request, exc.code, self.model.iobject_to_raw(exc.status))
@app.route(u"/version", methods=[u"GET"])
def get_version(self, request):
"""
Get version information about this server.
"""
return response(request, OK, self.model.version.serialize())
@app.route(u"/swagger.json", methods=[u"GET"])
def get_openapi(self, request):
"""
Get the OpenAPI specification for this server.
"""
return response(request, OK, self.model.spec.to_document())
with app.subroute(u"/api/v1") as app:
@app.route(u"/namespaces", methods=[u"GET"])
def list_namespaces(self, request):
"""
Get all existing Namespaces.
"""
return self._list(request, None, u"namespaces")
@app.route(u"/namespaces/<namespace>", methods=[u"GET"])
def get_namespace(self, request, namespace):
"""
Get one Namespace by name.
"""
return self._get(request, u"namespaces", None, namespace)
@app.route(u"/namespaces/<namespace>", methods=[u"DELETE"])
def delete_namespace(self, request, namespace):
"""
Delete one Namespace by name.
"""
return self._delete(
request, u"namespaces", None, namespace,
)
@app.route(u"/namespaces", methods=[u"POST"])
def create_namespace(self, request):
"""
Create a new Namespace.
"""
return self._create(request, u"namespaces")
@app.route(u"/namespaces/<namespace>", methods=[u"PUT"])
def replace_namespace(self, request, namespace):
"""
Replace an existing Namespace.
"""
return self._replace(
request,
u"namespaces",
None,
namespace,
)
@app.route(u"/namespaces/<namespace>/pods", methods=[u"POST"])
def create_pod(self, request, namespace):
"""
Create a new Pod.
"""
return self._create(request, u"pods")
@app.route(u"/namespaces/<namespace>/pods/<pod>", methods=[u"DELETE"])
def delete_pod(self, request, namespace, pod):
"""
Delete one Pod by name
"""
return self._delete(request, u"pods", namespace, pod)
@app.route(u"/pods", methods=[u"GET"])
def list_pods(self, request):
"""
Get all existing Pods.
"""
return self._list(request, None, u"pods")
@app.route(u"/namespaces/<namespace>/pods/<pod>", methods=[u"PUT"])
def replace_pod(self, request, namespace, pod):
"""
Replace an existing Pod.
"""
return self._replace(request, u"pods", namespace, pod)
@app.route(u"/namespaces/<namespace>/pods/<pod>", methods=[u"GET"])
def get_pod(self, request, namespace, pod):
"""
Get one Pod by name.
"""
return self._get(request, u"pods", namespace, pod)
@app.route(u"/configmaps", methods=[u"GET"])
def list_configmaps(self, request):
"""
Get all existing ConfigMaps.
"""
return self._list(request, None, u"configmaps")
@app.route(u"/namespaces/<namespace>/configmaps/<configmap>", methods=[u"GET"])
def get_configmap(self, request, namespace, configmap):
"""
Get one ConfigMap by name.
"""
return self._get(request, u"configmaps", namespace, configmap)
@app.route(u"/namespaces/<namespace>/configmaps", methods=[u"POST"])
def create_configmap(self, request, namespace):
"""
Create a new ConfigMap.
"""
return self._create(request, u"configmaps")
@app.route(u"/namespaces/<namespace>/configmaps/<configmap>", methods=[u"PUT"])
def replace_configmap(self, request, namespace, configmap):
"""
Replace an existing ConfigMap.
"""
return self._replace(
request,
u"configmaps",
namespace,
configmap,
)
@app.route(u"/namespaces/<namespace>/configmaps/<configmap>", methods=[u"DELETE"])
def delete_configmap(self, request, namespace, configmap):
"""
Delete one ConfigMap by name.
"""
return self._delete(
request, u"configmaps", namespace, configmap,
)
@app.route(u"/namespaces/<namespace>/services", methods=[u"POST"])
def create_service(self, request, namespace):
"""
Create a new Service.
"""
return self._create(request, u"services")
@app.route(u"/namespaces/<namespace>/services/<service>", methods=[u"PUT"])
def replace_service(self, request, namespace, service):
"""
Replace an existing Service.
"""
return self._replace(
request,
u"services",
namespace,
service,
)
@app.route(u"/services", methods=[u"GET"])
def list_services(self, request):
"""
Get all existing Services.
"""
return self._list(request, None, u"services")
@app.route(u"/namespaces/<namespace>/services/<service>", methods=[u"GET"])
def get_service(self, request, namespace, service):
"""
Get one Service by name.
"""
return self._get(
request,
u"services",
namespace,
service,
)
@app.route(u"/namespaces/<namespace>/services/<service>", methods=[u"DELETE"])
def delete_service(self, request, namespace, service):
"""
Delete one Service by name.
"""
return self._delete(
request, u"services", namespace, service,
)
with app.subroute(u"/apis/extensions/v1beta1") as app:
@app.route(u"/namespaces/<namespace>/deployments", methods=[u"POST"])
def create_deployment(self, request, namespace):
"""
Create a new Deployment.
"""
return self._create(
request,
u"deployments",
)
@app.route(u"/namespaces/<namespace>/deployments/<deployment>", methods=[u"PUT"])
def replace_deployment(self, request, namespace, deployment):
"""
Replace an existing Deployment.
"""
return self._replace(
request,
u"deployments",
namespace,
deployment,
)
@app.route(u"/deployments", methods=[u"GET"])
def list_deployments(self, request):
"""
Get all existing Deployments.
"""
return self._list(request, None, u"deployments")
@app.route(u"/namespaces/<namespace>/deployments/<deployment>", methods=[u"GET"])
def get_deployment(self, request, namespace, deployment):
"""
Get one Deployment by name.
"""
return self._get(
request,
u"deployments",
namespace,
deployment,
)
@app.route(u"/namespaces/<namespace>/deployments/<deployment>", methods=[u"DELETE"])
def delete_deployment(self, request, namespace, deployment):
"""
Delete one Deployment by name.
"""
return self._delete(
request, u"deployments", namespace, deployment,
)
@app.route(u"/namespaces/<namespace>/replicasets", methods=[u"POST"])
def create_replicaset(self, request, namespace):
"""
Create a new ReplicaSet.
"""
return self._create(
request,
u"replicasets",
)
@app.route(u"/namespaces/<namespace>/replicasets/<replicaset>", methods=[u"PUT"])
def replace_replicaset(self, request, namespace, replicaset):
"""
Replace an existing ReplicaSet.
"""
return self._replace(
request,
u"replicasets",
namespace,
replicaset,
)
@app.route(u"/replicasets", methods=[u"GET"])
def list_replicasets(self, request):
"""
Get all existing ReplicaSets.
"""
return self._list(request, None, u"replicasets")
@app.route(u"/namespaces/<namespace>/replicasets/<replicaset>", methods=[u"GET"])
def get_replicaset(self, request, namespace, replicaset):
"""
Get one ReplicaSet by name.
"""
return self._get(
request,
u"replicasets",
namespace,
replicaset,
)
@app.route(u"/namespaces/<namespace>/replicasets/<replicaset>", methods=[u"DELETE"])
def delete_replicaset(self, request, namespace, replicaset):
"""
Delete one ReplicaSet by name.
"""
return self._delete(
request, u"replicasets", namespace, replicaset,
)
| LeastAuthority/txkube | src/txkube/_memory.py | Python | mit | 25,882 | 0.001468 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for translation models.
"""
from django.test import TestCase
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import Permission, User
from django.core.exceptions import ValidationError
import shutil
import os
from weblate.trans.models import (
Project, SubProject, Unit, WhiteboardMessage, Check, get_related_units,
)
from weblate.trans.models.source import Source
from weblate import appsettings
from weblate.trans.tests.utils import get_test_file
from weblate.trans.vcs import GitRepository, HgRepository
REPOWEB_URL = \
'https://github.com/nijel/weblate-test/blob/master/%(file)s#L%(line)s'
GIT_URL = 'git://github.com/nijel/weblate-test.git'
HG_URL = 'https://[email protected]/nijel/weblate-test'
class RepoTestCase(TestCase):
"""
Generic class for tests working with repositories.
"""
def setUp(self):
# Path where to clone remote repo for tests
self.git_base_repo_path = os.path.join(
settings.DATA_DIR,
'test-base-repo.git'
)
# Repository on which tests will be performed
self.git_repo_path = os.path.join(
settings.DATA_DIR,
'test-repo.git'
)
# Path where to clone remote repo for tests
self.hg_base_repo_path = os.path.join(
settings.DATA_DIR,
'test-base-repo.hg'
)
# Repository on which tests will be performed
self.hg_repo_path = os.path.join(
settings.DATA_DIR,
'test-repo.hg'
)
# Clone repo for testing
if not os.path.exists(self.git_base_repo_path):
print(
'Cloning test repository to {0}...'.format(
self.git_base_repo_path
)
)
GitRepository.clone(
GIT_URL,
self.git_base_repo_path,
bare=True
)
# Remove possibly existing directory
if os.path.exists(self.git_repo_path):
shutil.rmtree(self.git_repo_path)
# Create repository copy for the test
shutil.copytree(self.git_base_repo_path, self.git_repo_path)
# Clone repo for testing
if not os.path.exists(self.hg_base_repo_path):
HgRepository.clone(
HG_URL,
self.hg_base_repo_path,
bare=True
)
# Remove possibly existing directory
if os.path.exists(self.hg_repo_path):
shutil.rmtree(self.hg_repo_path)
# Create repository copy for the test
shutil.copytree(self.hg_base_repo_path, self.hg_repo_path)
# Remove possibly existing project directory
test_repo_path = os.path.join(settings.DATA_DIR, 'vcs', 'test')
if os.path.exists(test_repo_path):
shutil.rmtree(test_repo_path)
def create_project(self):
"""
Creates test project.
"""
project = Project.objects.create(
name='Test',
slug='test',
web='http://weblate.org/'
)
self.addCleanup(shutil.rmtree, project.get_path(), True)
return project
def _create_subproject(self, file_format, mask, template='',
new_base='', vcs='git'):
"""
Creates real test subproject.
"""
project = self.create_project()
if vcs == 'mercurial':
branch = 'default'
repo = self.hg_repo_path
push = self.hg_repo_path
else:
branch = 'master'
repo = self.git_repo_path
push = self.git_repo_path
return SubProject.objects.create(
name='Test',
slug='test',
project=project,
repo=repo,
push=push,
branch=branch,
filemask=mask,
template=template,
file_format=file_format,
repoweb=REPOWEB_URL,
save_history=True,
new_base=new_base,
vcs=vcs
)
def create_subproject(self):
"""
Wrapper method for providing test subproject.
"""
return self._create_subproject(
'auto',
'po/*.po',
)
def create_po(self):
return self._create_subproject(
'po',
'po/*.po',
)
def create_po_mercurial(self):
return self._create_subproject(
'po',
'po/*.po',
vcs='mercurial'
)
def create_po_new_base(self):
return self._create_subproject(
'po',
'po/*.po',
new_base='po/hello.pot'
)
def create_po_link(self):
return self._create_subproject(
'po',
'po-link/*.po',
)
def create_po_mono(self):
return self._create_subproject(
'po-mono',
'po-mono/*.po',
'po-mono/en.po',
)
def create_ts(self, suffix=''):
return self._create_subproject(
'ts',
'ts{0}/*.ts'.format(suffix),
)
def create_iphone(self):
return self._create_subproject(
'strings',
'iphone/*.lproj/Localizable.strings',
)
def create_android(self):
return self._create_subproject(
'aresource',
'android/values-*/strings.xml',
'android/values/strings.xml',
)
def create_json(self):
return self._create_subproject(
'json',
'json/*.json',
)
def create_json_mono(self):
return self._create_subproject(
'json',
'json-mono/*.json',
'json-mono/en.json',
)
def create_java(self):
return self._create_subproject(
'properties',
'java/swing_messages_*.properties',
'java/swing_messages.properties',
)
def create_xliff(self, name='default'):
return self._create_subproject(
'xliff',
'xliff/*/%s.xlf' % name,
)
def create_link(self):
parent = self.create_iphone()
return SubProject.objects.create(
name='Test2',
slug='test2',
project=parent.project,
repo='weblate://test/test',
file_format='po',
filemask='po/*.po',
)
class ProjectTest(RepoTestCase):
"""
Project object testing.
"""
def test_create(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.get_path()))
self.assertTrue(project.slug in project.get_path())
def test_rename(self):
project = self.create_project()
old_path = project.get_path()
self.assertTrue(os.path.exists(old_path))
project.slug = 'changed'
project.save()
new_path = project.get_path()
self.addCleanup(shutil.rmtree, new_path, True)
self.assertFalse(os.path.exists(old_path))
self.assertTrue(os.path.exists(new_path))
def test_delete(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.get_path()))
project.delete()
self.assertFalse(os.path.exists(project.get_path()))
def test_delete_all(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.get_path()))
Project.objects.all().delete()
self.assertFalse(os.path.exists(project.get_path()))
def test_wrong_path(self):
project = self.create_project()
backup = appsettings.DATA_DIR
appsettings.DATA_DIR = '/weblate-nonexisting-path'
# Invalidate cache, pylint: disable=W0212
project._dir_path = None
self.assertRaisesMessage(
ValidationError,
'Could not create project directory',
project.full_clean
)
appsettings.DATA_DIR = backup
def test_acl(self):
"""
Test for ACL handling.
"""
# Create user to verify ACL
user = User.objects.create_user(
username='testuser',
password='testpassword'
)
# Create project
project = self.create_project()
# Enable ACL
project.enable_acl = True
project.save()
# Check user does not have access
self.assertFalse(project.has_acl(user))
# Add ACL
permission = Permission.objects.get(codename='weblate_acl_test')
user.user_permissions.add(permission)
# Need to fetch user again to clear permission cache
user = User.objects.get(username='testuser')
# We now should have access
self.assertTrue(project.has_acl(user))
class SubProjectTest(RepoTestCase):
"""
SubProject object testing.
"""
def verify_subproject(self, project, translations, lang, units,
unit='Hello, world!\n', fail=False):
# Validation
if fail:
self.assertRaises(
ValidationError,
project.full_clean
)
else:
project.full_clean()
# Correct path
self.assertTrue(os.path.exists(project.get_path()))
# Count translations
self.assertEqual(
project.translation_set.count(), translations
)
# Grab translation
translation = project.translation_set.get(language_code=lang)
# Count units in it
self.assertEqual(translation.unit_set.count(), units)
# Check whether unit exists
self.assertTrue(translation.unit_set.filter(source=unit).exists())
def test_create(self):
project = self.create_subproject()
self.verify_subproject(project, 3, 'cs', 4)
self.assertTrue(os.path.exists(project.get_path()))
def test_create_dot(self):
project = self._create_subproject(
'auto',
'./po/*.po',
)
self.verify_subproject(project, 3, 'cs', 4)
self.assertTrue(os.path.exists(project.get_path()))
self.assertEqual('po/*.po', project.filemask)
def test_rename(self):
subproject = self.create_subproject()
old_path = subproject.get_path()
self.assertTrue(os.path.exists(old_path))
subproject.slug = 'changed'
subproject.save()
self.assertFalse(os.path.exists(old_path))
self.assertTrue(os.path.exists(subproject.get_path()))
def test_delete(self):
project = self.create_subproject()
self.assertTrue(os.path.exists(project.get_path()))
project.delete()
self.assertFalse(os.path.exists(project.get_path()))
def test_delete_link(self):
project = self.create_link()
main_project = SubProject.objects.get(slug='test')
self.assertTrue(os.path.exists(main_project.get_path()))
project.delete()
self.assertTrue(os.path.exists(main_project.get_path()))
def test_delete_all(self):
project = self.create_subproject()
self.assertTrue(os.path.exists(project.get_path()))
SubProject.objects.all().delete()
self.assertFalse(os.path.exists(project.get_path()))
def test_create_iphone(self):
project = self.create_iphone()
self.verify_subproject(project, 1, 'cs', 4)
def test_create_ts(self):
project = self.create_ts('-translated')
self.verify_subproject(project, 1, 'cs', 4)
unit = Unit.objects.get(source__startswith='Orangutan')
self.assertTrue(unit.is_plural())
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
unit = Unit.objects.get(source__startswith='Hello')
self.assertFalse(unit.is_plural())
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Hello, world!\n')
unit = Unit.objects.get(source__startswith='Thank ')
self.assertFalse(unit.is_plural())
self.assertFalse(unit.translated)
self.assertTrue(unit.fuzzy)
self.assertEqual(unit.target, 'Thanks')
def test_create_po_pot(self):
project = self._create_subproject(
'po',
'po/*.po',
'po/project.pot'
)
self.verify_subproject(project, 3, 'cs', 4, fail=True)
def test_create_auto_pot(self):
project = self._create_subproject(
'auto',
'po/*.po',
'po/project.pot'
)
self.verify_subproject(project, 3, 'cs', 4, fail=True)
def test_create_po(self):
project = self.create_po()
self.verify_subproject(project, 3, 'cs', 4)
def test_create_po_mercurial(self):
project = self.create_po_mercurial()
self.verify_subproject(project, 3, 'cs', 4)
def test_create_po_link(self):
project = self.create_po_link()
self.verify_subproject(project, 3, 'cs', 4)
def test_create_po_mono(self):
project = self.create_po_mono()
self.verify_subproject(project, 4, 'cs', 4)
def test_create_android(self):
project = self.create_android()
self.verify_subproject(project, 2, 'cs', 4)
def test_create_json(self):
project = self.create_json()
self.verify_subproject(project, 1, 'cs', 4)
def test_create_json_mono(self):
project = self.create_json_mono()
self.verify_subproject(project, 2, 'cs', 4)
def test_create_java(self):
project = self.create_java()
self.verify_subproject(project, 3, 'cs', 4)
def test_create_xliff(self):
project = self.create_xliff()
self.verify_subproject(project, 1, 'cs', 4)
def test_create_xliff_dph(self):
project = self.create_xliff('DPH')
self.verify_subproject(project, 1, 'en', 9, 'DPH')
def test_create_xliff_empty(self):
project = self.create_xliff('EMPTY')
self.verify_subproject(project, 1, 'en', 6, 'DPH')
def test_create_xliff_resname(self):
project = self.create_xliff('Resname')
self.verify_subproject(project, 1, 'en', 2, 'Hi')
def test_link(self):
project = self.create_link()
self.verify_subproject(project, 3, 'cs', 4)
def test_extra_file(self):
"""
Extra commit file validation.
"""
project = self.create_subproject()
project.full_clean()
project.extra_commit_file = 'locale/list.txt'
project.full_clean()
project.extra_commit_file = 'locale/%(language)s.txt'
project.full_clean()
project.extra_commit_file = 'locale/%(bar)s.txt'
self.assertRaisesMessage(
ValidationError,
"Bad format string ('bar')",
project.full_clean
)
def test_check_flags(self):
"""
Check flags validation.
"""
project = self.create_subproject()
project.full_clean()
project.check_flags = 'ignore-inconsistent'
project.full_clean()
project.check_flags = 'rst-text,ignore-inconsistent'
project.full_clean()
project.check_flags = 'nonsense'
self.assertRaisesMessage(
ValidationError,
'Invalid check flag: "nonsense"',
project.full_clean
)
project.check_flags = 'rst-text,ignore-nonsense'
self.assertRaisesMessage(
ValidationError,
'Invalid check flag: "ignore-nonsense"',
project.full_clean
)
def test_validation(self):
project = self.create_subproject()
# Correct project
project.full_clean()
# Invalid commit message
project.commit_message = '%(foo)s'
self.assertRaisesMessage(
ValidationError,
'Bad format string',
project.full_clean
)
# Invalid mask
project.filemask = 'foo/x.po'
self.assertRaisesMessage(
ValidationError,
'File mask does not contain * as a language placeholder!',
project.full_clean
)
# Not matching mask
project.filemask = 'foo/*.po'
self.assertRaisesMessage(
ValidationError,
'The mask did not match any files!',
project.full_clean
)
# Unknown file format
project.filemask = 'iphone/*.lproj/Localizable.strings'
self.assertRaisesMessage(
ValidationError,
'Format of 1 matched files could not be recognized.',
project.full_clean
)
# Repoweb
project.repoweb = 'http://%(foo)s/%(bar)s/%72'
self.assertRaisesMessage(
ValidationError,
"Bad format string ('foo')",
project.full_clean
)
project.repoweb = ''
# Bad link
project.repo = 'weblate://foo'
project.push = ''
self.assertRaisesMessage(
ValidationError,
'Invalid link to a Weblate project, '
'use weblate://project/subproject.',
project.full_clean
)
# Bad link
project.repo = 'weblate://foo/bar'
project.push = ''
self.assertRaisesMessage(
ValidationError,
'Invalid link to a Weblate project, '
'use weblate://project/subproject.',
project.full_clean
)
# Bad link
project.repo = 'weblate://test/test'
project.push = ''
self.assertRaisesMessage(
ValidationError,
'Invalid link to a Weblate project, '
'can not link to self!',
project.full_clean
)
def test_validation_mono(self):
project = self.create_po_mono()
# Correct project
project.full_clean()
# Not existing file
project.template = 'not-existing'
self.assertRaisesMessage(
ValidationError,
'Template file not found!',
project.full_clean
)
def test_validation_newlang(self):
subproject = self.create_subproject()
subproject.new_base = 'po/project.pot'
subproject.save()
# Check that it warns about unused pot
self.assertRaisesMessage(
ValidationError,
'Base file for new translations is not used '
'because of project settings.',
subproject.full_clean
)
subproject.new_lang = 'add'
subproject.save()
# Check that it warns about not supported format
self.assertRaisesMessage(
ValidationError,
'Chosen file format does not support adding new '
'translations as chosen in project settings.',
subproject.full_clean
)
subproject.file_format = 'po'
subproject.save()
# Clean class cache, pylint: disable=W0212
subproject._file_format = None
# With correct format it should validate
subproject.full_clean()
def test_change_to_mono(self):
"""Test swtiching to monolingual format on the fly."""
component = self._create_subproject(
'po',
'po-mono/*.po',
)
self.assertEqual(component.translation_set.count(), 4)
component.file_format = 'po-mono'
component.template = 'po-mono/en.po'
component.save()
self.assertEqual(component.translation_set.count(), 4)
class TranslationTest(RepoTestCase):
"""
Translation testing.
"""
def test_basic(self):
project = self.create_subproject()
translation = project.translation_set.get(language_code='cs')
self.assertEqual(translation.translated, 0)
self.assertEqual(translation.total, 4)
self.assertEqual(translation.fuzzy, 0)
def test_extra_file(self):
"""
Test extra commit file handling.
"""
subproject = self.create_subproject()
subproject.pre_commit_script = get_test_file(
'../../../../examples/hook-generate-mo'
)
appsettings.SCRIPT_CHOICES.append(
(subproject.pre_commit_script, 'hook-generate-mo')
)
subproject.extra_commit_file = 'po/%(language)s.mo'
subproject.save()
subproject.full_clean()
translation = subproject.translation_set.get(language_code='cs')
# change backend file
with open(translation.get_filename(), 'a') as handle:
handle.write(' ')
# Test committing
translation.git_commit(
None, 'TEST <[email protected]>', timezone.now(),
force_commit=True
)
def test_validation(self):
"""
Translation validation
"""
project = self.create_subproject()
translation = project.translation_set.get(language_code='cs')
translation.full_clean()
class WhiteboardMessageTest(TestCase):
"""Test(s) for WhiteboardMessage model."""
def test_can_be_imported(self):
"""Test that whiteboard model can be imported.
Rather dumb test just to make sure there are no obvious parsing errors.
"""
WhiteboardMessage()
class SourceTest(RepoTestCase):
"""
Source objects testing.
"""
def setUp(self):
super(SourceTest, self).setUp()
self.create_subproject()
def test_exists(self):
self.assertTrue(Source.objects.exists())
def test_source_info(self):
unit = Unit.objects.all()[0]
self.assertIsNotNone(unit.source_info)
def test_priority(self):
unit = Unit.objects.all()[0]
self.assertEqual(unit.priority, 100)
source = unit.source_info
source.priority = 200
source.save()
unit2 = Unit.objects.get(pk=unit.pk)
self.assertEqual(unit2.priority, 200)
def test_check_flags(self):
"""
Setting of Source check_flags changes checks for related units.
"""
self.assertEquals(Check.objects.count(), 1)
check = Check.objects.all()[0]
unit = get_related_units(check)[0]
source = unit.source_info
source.check_flags = 'ignore-{0}'.format(check.check)
source.save()
self.assertEquals(Check.objects.count(), 0)
| dhodhala88/Bosch1 | weblate/trans/tests/test_models.py | Python | gpl-3.0 | 23,236 | 0 |
# Generated by Django 1.11.21 on 2019-07-01 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructor_task', '0002_gradereportsetting'),
]
operations = [
migrations.AlterField(
model_name='instructortask',
name='task_input',
field=models.TextField(),
),
]
| eduNEXT/edx-platform | lms/djangoapps/instructor_task/migrations/0003_alter_task_input_field.py | Python | agpl-3.0 | 396 | 0 |
import gc
import os
import random
import eventlet
from eventlet import hubs, greenpool, event, pools
from eventlet.support import greenlets as greenlet, six
import tests
def passthru(a):
eventlet.sleep(0.01)
return a
def passthru2(a, b):
eventlet.sleep(0.01)
return a, b
def raiser(exc):
raise exc
class GreenPool(tests.LimitedTestCase):
def test_spawn(self):
p = greenpool.GreenPool(4)
waiters = []
for i in range(10):
waiters.append(p.spawn(passthru, i))
results = [waiter.wait() for waiter in waiters]
self.assertEqual(results, list(range(10)))
def test_spawn_n(self):
p = greenpool.GreenPool(4)
results_closure = []
def do_something(a):
eventlet.sleep(0.01)
results_closure.append(a)
for i in range(10):
p.spawn(do_something, i)
p.waitall()
self.assertEqual(results_closure, list(range(10)))
def test_waiting(self):
pool = greenpool.GreenPool(1)
done = event.Event()
def consume():
done.wait()
def waiter(pool):
gt = pool.spawn(consume)
gt.wait()
waiters = []
self.assertEqual(pool.running(), 0)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 0)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 1)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 2)
self.assertEqual(pool.running(), 1)
done.send(None)
for w in waiters:
w.wait()
self.assertEqual(pool.waiting(), 0)
self.assertEqual(pool.running(), 0)
def test_multiple_coros(self):
evt = event.Event()
results = []
def producer():
results.append('prod')
evt.send()
def consumer():
results.append('cons1')
evt.wait()
results.append('cons2')
pool = greenpool.GreenPool(2)
done = pool.spawn(consumer)
pool.spawn_n(producer)
done.wait()
self.assertEqual(['cons1', 'prod', 'cons2'], results)
def test_timer_cancel(self):
# this test verifies that local timers are not fired
# outside of the context of the spawn
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
hubs.get_hub().schedule_call_local(0, fire_timer)
pool = greenpool.GreenPool(2)
worker = pool.spawn(some_work)
worker.wait()
eventlet.sleep(0)
eventlet.sleep(0)
self.assertEqual(timer_fired, [])
def test_reentrant(self):
pool = greenpool.GreenPool(1)
def reenter():
waiter = pool.spawn(lambda a: a, 'reenter')
self.assertEqual('reenter', waiter.wait())
outer_waiter = pool.spawn(reenter)
outer_waiter.wait()
evt = event.Event()
def reenter_async():
pool.spawn_n(lambda a: a, 'reenter')
evt.send('done')
pool.spawn_n(reenter_async)
self.assertEqual('done', evt.wait())
def assert_pool_has_free(self, pool, num_free):
self.assertEqual(pool.free(), num_free)
def wait_long_time(e):
e.wait()
timer = eventlet.Timeout(1)
try:
evt = event.Event()
for x in six.moves.range(num_free):
pool.spawn(wait_long_time, evt)
# if the pool has fewer free than we expect,
# then we'll hit the timeout error
finally:
timer.cancel()
# if the runtime error is not raised it means the pool had
# some unexpected free items
timer = eventlet.Timeout(0, RuntimeError)
try:
self.assertRaises(RuntimeError, pool.spawn, wait_long_time, evt)
finally:
timer.cancel()
# clean up by causing all the wait_long_time functions to return
evt.send(None)
eventlet.sleep(0)
eventlet.sleep(0)
def test_resize(self):
pool = greenpool.GreenPool(2)
evt = event.Event()
def wait_long_time(e):
e.wait()
pool.spawn(wait_long_time, evt)
pool.spawn(wait_long_time, evt)
self.assertEqual(pool.free(), 0)
self.assertEqual(pool.running(), 2)
self.assert_pool_has_free(pool, 0)
# verify that the pool discards excess items put into it
pool.resize(1)
# cause the wait_long_time functions to return, which will
# trigger puts to the pool
evt.send(None)
eventlet.sleep(0)
eventlet.sleep(0)
self.assertEqual(pool.free(), 1)
self.assertEqual(pool.running(), 0)
self.assert_pool_has_free(pool, 1)
# resize larger and assert that there are more free items
pool.resize(2)
self.assertEqual(pool.free(), 2)
self.assertEqual(pool.running(), 0)
self.assert_pool_has_free(pool, 2)
def test_pool_smash(self):
# The premise is that a coroutine in a Pool tries to get a token out
# of a token pool but times out before getting the token. We verify
# that neither pool is adversely affected by this situation.
pool = greenpool.GreenPool(1)
tp = pools.TokenPool(max_size=1)
tp.get() # empty out the pool
def do_receive(tp):
timer = eventlet.Timeout(0, RuntimeError())
try:
tp.get()
self.fail("Shouldn't have received anything from the pool")
except RuntimeError:
return 'timed out'
else:
timer.cancel()
# the spawn makes the token pool expect that coroutine, but then
# immediately cuts bait
e1 = pool.spawn(do_receive, tp)
self.assertEqual(e1.wait(), 'timed out')
# the pool can get some random item back
def send_wakeup(tp):
tp.put('wakeup')
gt = eventlet.spawn(send_wakeup, tp)
# now we ask the pool to run something else, which should not
# be affected by the previous send at all
def resume():
return 'resumed'
e2 = pool.spawn(resume)
self.assertEqual(e2.wait(), 'resumed')
# we should be able to get out the thing we put in there, too
self.assertEqual(tp.get(), 'wakeup')
gt.wait()
def test_spawn_n_2(self):
p = greenpool.GreenPool(2)
self.assertEqual(p.free(), 2)
r = []
def foo(a):
r.append(a)
gt = p.spawn(foo, 1)
self.assertEqual(p.free(), 1)
gt.wait()
self.assertEqual(r, [1])
eventlet.sleep(0)
self.assertEqual(p.free(), 2)
# Once the pool is exhausted, spawning forces a yield.
p.spawn_n(foo, 2)
self.assertEqual(1, p.free())
self.assertEqual(r, [1])
p.spawn_n(foo, 3)
self.assertEqual(0, p.free())
self.assertEqual(r, [1])
p.spawn_n(foo, 4)
self.assertEqual(set(r), set([1, 2, 3]))
eventlet.sleep(0)
self.assertEqual(set(r), set([1, 2, 3, 4]))
def test_exceptions(self):
p = greenpool.GreenPool(2)
for m in (p.spawn, p.spawn_n):
self.assert_pool_has_free(p, 2)
m(raiser, RuntimeError())
self.assert_pool_has_free(p, 1)
p.waitall()
self.assert_pool_has_free(p, 2)
m(raiser, greenlet.GreenletExit)
self.assert_pool_has_free(p, 1)
p.waitall()
self.assert_pool_has_free(p, 2)
def test_imap(self):
p = greenpool.GreenPool(4)
result_list = list(p.imap(passthru, range(10)))
self.assertEqual(result_list, list(range(10)))
def test_empty_imap(self):
p = greenpool.GreenPool(4)
result_iter = p.imap(passthru, [])
self.assertRaises(StopIteration, result_iter.next)
def test_imap_nonefunc(self):
p = greenpool.GreenPool(4)
result_list = list(p.imap(None, range(10)))
self.assertEqual(result_list, [(x,) for x in range(10)])
def test_imap_multi_args(self):
p = greenpool.GreenPool(4)
result_list = list(p.imap(passthru2, range(10), range(10, 20)))
self.assertEqual(result_list, list(zip(range(10), range(10, 20))))
def test_imap_raises(self):
# testing the case where the function raises an exception;
# both that the caller sees that exception, and that the iterator
# continues to be usable to get the rest of the items
p = greenpool.GreenPool(4)
def raiser(item):
if item == 1 or item == 7:
raise RuntimeError("intentional error")
else:
return item
it = p.imap(raiser, range(10))
results = []
while True:
try:
results.append(six.next(it))
except RuntimeError:
results.append('r')
except StopIteration:
break
self.assertEqual(results, [0, 'r', 2, 3, 4, 5, 6, 'r', 8, 9])
def test_starmap(self):
p = greenpool.GreenPool(4)
result_list = list(p.starmap(passthru, [(x,) for x in range(10)]))
self.assertEqual(result_list, list(range(10)))
def test_waitall_on_nothing(self):
p = greenpool.GreenPool()
p.waitall()
def test_recursive_waitall(self):
p = greenpool.GreenPool()
gt = p.spawn(p.waitall)
self.assertRaises(AssertionError, gt.wait)
class GreenPile(tests.LimitedTestCase):
def test_pile(self):
p = greenpool.GreenPile(4)
for i in range(10):
p.spawn(passthru, i)
result_list = list(p)
self.assertEqual(result_list, list(range(10)))
def test_pile_spawn_times_out(self):
p = greenpool.GreenPile(4)
for i in range(4):
p.spawn(passthru, i)
# now it should be full and this should time out
eventlet.Timeout(0)
self.assertRaises(eventlet.Timeout, p.spawn, passthru, "time out")
# verify that the spawn breakage didn't interrupt the sequence
# and terminates properly
for i in range(4, 10):
p.spawn(passthru, i)
self.assertEqual(list(p), list(range(10)))
def test_constructing_from_pool(self):
pool = greenpool.GreenPool(2)
pile1 = greenpool.GreenPile(pool)
pile2 = greenpool.GreenPile(pool)
def bunch_of_work(pile, unique):
for i in range(10):
pile.spawn(passthru, i + unique)
eventlet.spawn(bunch_of_work, pile1, 0)
eventlet.spawn(bunch_of_work, pile2, 100)
eventlet.sleep(0)
self.assertEqual(list(pile2), list(range(100, 110)))
self.assertEqual(list(pile1), list(range(10)))
class StressException(Exception):
pass
r = random.Random(0)
def pressure(arg):
while r.random() < 0.5:
eventlet.sleep(r.random() * 0.001)
if r.random() < 0.8:
return arg
else:
raise StressException(arg)
def passthru(arg):
while r.random() < 0.5:
eventlet.sleep(r.random() * 0.001)
return arg
class Stress(tests.LimitedTestCase):
# tests will take extra-long
TEST_TIMEOUT = 60
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def spawn_order_check(self, concurrency):
# checks that piles are strictly ordered
p = greenpool.GreenPile(concurrency)
def makework(count, unique):
for i in six.moves.range(count):
token = (unique, i)
p.spawn(pressure, token)
iters = 1000
eventlet.spawn(makework, iters, 1)
eventlet.spawn(makework, iters, 2)
eventlet.spawn(makework, iters, 3)
p.spawn(pressure, (0, 0))
latest = [-1] * 4
received = 0
it = iter(p)
while True:
try:
i = six.next(it)
except StressException as exc:
i = exc.args[0]
except StopIteration:
break
received += 1
if received % 5 == 0:
eventlet.sleep(0.0001)
unique, order = i
assert latest[unique] < order
latest[unique] = order
for l in latest[1:]:
self.assertEqual(l, iters - 1)
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def test_ordering_5(self):
self.spawn_order_check(5)
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def test_ordering_50(self):
self.spawn_order_check(50)
def imap_memory_check(self, concurrency):
# checks that imap is strictly
# ordered and consumes a constant amount of memory
p = greenpool.GreenPool(concurrency)
count = 1000
it = p.imap(passthru, six.moves.range(count))
latest = -1
while True:
try:
i = six.next(it)
except StopIteration:
break
if latest == -1:
gc.collect()
initial_obj_count = len(gc.get_objects())
assert i > latest
latest = i
if latest % 5 == 0:
eventlet.sleep(0.001)
if latest % 10 == 0:
gc.collect()
objs_created = len(gc.get_objects()) - initial_obj_count
assert objs_created < 25 * concurrency, objs_created
# make sure we got to the end
self.assertEqual(latest, count - 1)
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def test_imap_50(self):
self.imap_memory_check(50)
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def test_imap_500(self):
self.imap_memory_check(500)
@tests.skip_unless(os.environ.get('RUN_STRESS_TESTS') == 'YES')
def test_with_intpool(self):
class IntPool(pools.Pool):
def create(self):
self.current_integer = getattr(self, 'current_integer', 0) + 1
return self.current_integer
def subtest(intpool_size, pool_size, num_executes):
def run(int_pool):
token = int_pool.get()
eventlet.sleep(0.0001)
int_pool.put(token)
return token
int_pool = IntPool(max_size=intpool_size)
pool = greenpool.GreenPool(pool_size)
for ix in six.moves.range(num_executes):
pool.spawn(run, int_pool)
pool.waitall()
subtest(4, 7, 7)
subtest(50, 75, 100)
for isize in (10, 20, 30, 40, 50):
for psize in (5, 25, 35, 50):
subtest(isize, psize, psize)
| collinstocks/eventlet | tests/greenpool_test.py | Python | mit | 15,085 | 0.000133 |
"""
Common task fixtures.
"""
#
# Fimfarchive, preserves stories from Fimfiction.
# Copyright (C) 2020 Joakim Soderlund
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy
from typing import Dict
from fimfarchive.exceptions import InvalidStoryError
from fimfarchive.converters import Converter
from fimfarchive.fetchers import Fetcher
from fimfarchive.stories import Story
from fimfarchive.utils import Empty
class DummyConverer(Converter):
"""
Converter that increments a counter.
"""
def __call__(self, story: Story) -> Story:
meta = deepcopy(story.meta)
meta['conversions'] += 1
return story.merge(meta=meta)
class DummyFetcher(Fetcher):
"""
Fetcher with local instance storage.
"""
def __init__(self):
"""
Constructor.
"""
self.stories: Dict[int, Story] = dict()
def add(self, key, date, flavors=(), data=Empty):
"""
Adds a story to the fetcher.
"""
meta = {
'id': key,
'title': f't{key}',
'date_modified': date,
'conversions': 0,
'author': {
'id': key,
'name': f'n{key}'
},
'chapters': [
{'id': key},
],
}
if data is Empty:
text = f'd{key}'
data = text.encode()
story = Story(
key=key,
fetcher=self,
meta=meta,
data=data,
flavors=flavors,
)
self.stories[key] = story
return story
def fetch(self, key, prefetch_meta=None, prefetch_data=None):
"""
Returns a previously stored story.
"""
try:
return self.stories[key]
except KeyError:
raise InvalidStoryError()
def fetch_data(self, key):
"""
Raises exception for missing data.
"""
raise InvalidStoryError()
def fetch_meta(self, key):
"""
Raises exception for missing meta.
"""
raise InvalidStoryError()
def __iter__(self):
"""
Yields all previously stored stories.
"""
for key in sorted(self.stories.keys()):
yield self.stories[key]
| JockeTF/fimfarchive | tests/tasks/conftest.py | Python | gpl-3.0 | 2,907 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os, sys, re
import random
from time import time
import cv2
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
import overwrite
from chainer.functions import caffe
"""
official page: http://illustration2vec.net/
paper: http://illustration2vec.net/papers/illustration2vec-main.pdf
caffe model: http://illustration2vec.net/models/illust2vec_ver200.caffemodel
image_mean: http://illustration2vec.net/models/image_mean.npy
layer structure is like this:
[(u'conv1_1', [u'data'], [u'conv1_1']),
(u'relu1_1', [u'conv1_1'], [u'conv1_1']),
(u'pool1', [u'conv1_1'], [u'pool1']),
(u'conv2_1', [u'pool1'], [u'conv2_1']),
(u'relu2_1', [u'conv2_1'], [u'conv2_1']),
(u'pool2', [u'conv2_1'], [u'pool2']),
(u'conv3_1', [u'pool2'], [u'conv3_1']),
(u'relu3_1', [u'conv3_1'], [u'conv3_1']),
(u'conv3_2', [u'conv3_1'], [u'conv3_2']),
(u'relu3_2', [u'conv3_2'], [u'conv3_2']),
(u'pool3', [u'conv3_2'], [u'pool3']),
(u'conv4_1', [u'pool3'], [u'conv4_1']),
(u'relu4_1', [u'conv4_1'], [u'conv4_1']),
(u'conv4_2', [u'conv4_1'], [u'conv4_2']),
(u'relu4_2', [u'conv4_2'], [u'conv4_2']),
(u'pool4', [u'conv4_2'], [u'pool4']),
(u'conv5_1', [u'pool4'], [u'conv5_1']),
(u'relu5_1', [u'conv5_1'], [u'conv5_1']),
(u'conv5_2', [u'conv5_1'], [u'conv5_2']),
(u'relu5_2', [u'conv5_2'], [u'conv5_2']),
(u'pool5', [u'conv5_2'], [u'pool5']),
(u'conv6_1', [u'pool5'], [u'conv6_1']),
(u'relu6_1', [u'conv6_1'], [u'conv6_1']),
(u'conv6_2', [u'conv6_1'], [u'conv6_2']),
(u'relu6_2', [u'conv6_2'], [u'conv6_2']),
(u'conv6_3', [u'conv6_2'], [u'conv6_3']),
(u'relu6_3', [u'conv6_3'], [u'conv6_3']),
(u'drop6_3', [u'conv6_3'], [u'conv6_3']),
(u'encode1', [u'conv6_3'], [u'encode1']),
(u'encode2', [u'encode1neuron'], [u'encode2'])]
"""
def getNeuralCode(directory, layer="conv5_1", gpu=-1):
model = "illust2vec_ver200.caffemodel"
#use illust2vec_ver200
print('illust2vec_ver200 is being loaded!')
#calculate load time
timeMemory = time()
func = caffe.CaffeFunction(model)
print('illust2vec_ver200 was loaded!')
print('It took ' + str(int(time() - timeMemory)) + " secondes")
#gpu mode
if gpu >= 0:
cuda.init(gpu)
func.to_gpu()
in_size = 224
# Constant mean over spatial pixels
mean_image = np.load("illust2vec_image_mean.npy")
print("neural code is extraced from layer " + layer)
def neuralCode(x): #推測関数
y, = func(inputs={'data': x}, outputs=[layer],
train=False)
return y.data[0]
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
mean_image = mean_image[:, start:stop, start:stop].copy()
target_shape = (256, 256)
output_side_length=256
numPic = 0
#count pictures
for folderPath in directory:
#search pictures
picturePath = [picture for picture in os.listdir(folderPath)
if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
print("you have " + str(len(picturePath)) + " pictures in " + folderPath)
numPic = numPic + len(picturePath)
print("you have totally " + str(numPic) + " pictures")
count = 0
answer = {}
for folderPath in directory:
#search pictures
picturePath = [picture for picture in os.listdir(folderPath)
if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
for picture in picturePath:
timeMemory = time()
count = count + 1
#load image file
image = cv2.imread(folderPath + "/" + picture)
#resize and crop
height, width, depth = image.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
resized_img = cv2.resize(image, (new_width, new_height))
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
image= resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
#subtract mean image
image = image.transpose(2, 0, 1)
image = image[:, start:stop, start:stop].astype(np.float32)
image -= mean_image
x_batch = np.ndarray(
(1, 3, in_size,in_size), dtype=np.float32)
x_batch[0]=image
if gpu >= 0:
x_batch=cuda.to_gpu(x_batch)
#get neural code
x = chainer.Variable(x_batch, volatile=True)
answer[folderPath + "/" + picture] = neuralCode(x)
sen = overwrite.bar(count,numPic)
overwrite.overwrite(sen)
return answer
| nutszebra/ddp | illust2vecNC.py | Python | mit | 4,789 | 0.019661 |
# -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_contemporary_base_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
hhrs = float(oeq_global.OeQ_project_info['heating_degree_days']) * 24
return{'HHRS':{'type': QVariant.Double, 'value': hhrs}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='General',
extension_name='Average Heating Hours',
layer_name= 'Average Heating Hours',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='HHRS',
source_type='none',
par_in=[],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['HHRS'],
description=u"Calculate Average Heating Hours",
evaluation_method=calculation)
extension.registerExtension(default=True)
| UdK-VPT/Open_eQuarter | mole/extensions/prop_buildings/oeq_HHRS.py | Python | gpl-2.0 | 1,148 | 0.008711 |
from django.conf import settings
from appconf import AppConf
class ModelDatabankAppConf(AppConf):
"""App specific settings. Overridable in global settings.
DATA_PATH: path to the real mercurial repositories; these should never
be manipulated directly. Active repositories are symlinked in the
SYMLINK_PATH directory by their slug name.
SYMLINK_PATH: path with symlinks to active model reference repositories.
UPLOAD_PATH: uploaded zip files end up in this directory.
ZIP_EXTRACT_PATH: the uploaded zip files are extracted in this directory.
DOWNLOAD_PATH: repositories that are zipped for download are put in here.
REPOSITORY_URL_ROOT: root url for cloning repositories.
MAX_REVISIONS: maximum total number of revisions shown for a model
MAX_REVISIONS_PER_PAGE: maximum number of revisions per page.
"""
DATA_PATH = "/tmp/model_databank_repositories"
SYMLINK_PATH = "/tmp/model_databank"
UPLOAD_PATH = "/tmp/uploads"
ZIP_EXTRACT_PATH = "/tmp/extracted_zip_files/"
DOWNLOAD_PATH = "/tmp/downloads"
REPOSITORY_URL_ROOT = 'http://127.0.0.1:8012'
MAX_REVISIONS = 500
MAX_REVISIONS_PER_PAGE = 100
class Meta:
prefix = 'model_databank'
| nens/model-databank | model_databank/conf.py | Python | gpl-3.0 | 1,241 | 0 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-deferred-delete"
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| yanheven/nova | nova/tests/functional/v3/test_deferred_delete.py | Python | apache-2.0 | 1,610 | 0 |
from db_utils import deleteLinksByHost
from db_utils import deleteHost
from db_utils import addNewHost
from db_utils import getAllHosts
from error_message import showErrorPage
from error_message import ErrorMessages
import utils
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = utils.getJinjaEnvironment()
class AddHost(webapp2.RequestHandler):
def get(self):
"""
descripion:
adds a new host to the database, and redirect to '/'
params:
name - host name
interval - pinging interval for all the links belonging to the host.
response:
redirect to '/admin'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
if ndb.Key('Host', name).get() is not None:
showErrorPage(self, ErrorMessages.duplicatingHostName())
return
try:
interval = int(self.request.get('interval'))
except ValueError:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
if interval == 0:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
addNewHost(name, interval)
self.redirect('/admin')
class DeleteHost(webapp2.RequestHandler):
def get(self):
"""
description:
deletes an existing host, and redirects to '/'. All the links belonging
to the host will also be deleted.
params:
name - host name
response:
redirect to '/'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
hostKey = ndb.Key('Host', name)
if hostKey.get() is None:
showErrorPage(self, ErrorMessages.hostDoesNotExist())
return
deleteLinksByHost(name)
deleteHost(name)
self.redirect('/')
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
hosts = getAllHosts()
template_values = {
'hosts': hosts,
'user': user,
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/admin/host/add', AddHost),
('/admin/host/delete', DeleteHost),
], debug=True)
| cloud-io/CloudUp | src/admin_views.py | Python | mit | 2,370 | 0.007173 |
"""Unittests for the barrista project."""
# pylint: disable=F0401, C0330, C0302, C0103, R0201, R0914, R0915, W0212
# pylint: disable=no-name-in-module, no-member
import unittest
import logging
logging.basicConfig(level=logging.WARN)
try:
import cv2 # pylint: disable=W0611
CV2_AVAILABLE = True
except ImportError:
CV2_AVAILABLE = False
class NetSpecificationTestCase(unittest.TestCase):
"""Tests the :py:class:`barrista.design.NetSpecification` class."""
def test_initialization(self):
"""Test initialization and checks."""
import barrista.design as design
# Basic init works.
_ = design.NetSpecification([[2, 2]])
_ = design.NetSpecification([[2, 2, 2, 2]])
# Checks work.
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2, 2, 2], [2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_input_shapes=[[2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'],
predict_input_shapes=[[]])
_ = design.NetSpecification([[10, 3, 51, 51], [10]], # noqa
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
def test_get_predict_net_specification(self):
"""Test the method ``get_predict_net_specification``."""
import barrista.design as design
with self.assertRaises(AssertionError):
netspec = design.NetSpecification([[2, 2]])
netspec.get_predict_net_specification()
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
pred_netspec = netspec.get_predict_net_specification()
self.assertEqual(pred_netspec.name, netspec.name)
self.assertEqual(pred_netspec.debug_info, netspec.debug_info)
self.assertEqual(pred_netspec.stages, ['predict'])
self.assertEqual(pred_netspec.level, netspec.level)
self.assertEqual(pred_netspec.phase, design.Phase.TEST)
self.assertEqual(pred_netspec.force_backward, False)
self.assertEqual(pred_netspec.layers, netspec.layers)
self.assertEqual(pred_netspec.inputs, netspec.predict_inputs)
self.assertEqual(pred_netspec.input_shape,
netspec.predict_input_shapes)
def test_to_pbuf_message(self):
"""Test the method ``to_pbuf_message``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
netspec_msg = netspec.to_pbuf_message()
self.assertEqual(netspec_msg.IsInitialized(), True)
self.assertEqual(netspec_msg.input, netspec.inputs)
if hasattr(netspec_msg, 'input_shape'):
for msgshape, specshape in zip(netspec_msg.input_shape,
netspec.input_shape):
self.assertEqual(list(msgshape.dim), specshape)
self.assertEqual(len(netspec_msg.layer), len(netspec.layers))
self.assertEqual(netspec_msg.state.phase, netspec.phase)
self.assertEqual(netspec_msg.state.level, netspec.level)
self.assertEqual(netspec_msg.state.stage, netspec.stages)
self.assertEqual(netspec_msg.name, netspec.name)
self.assertEqual(netspec_msg.debug_info, netspec.debug_info)
def test_prototxt_conversion(self):
"""Test the prototxt conversion methods."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
import tempfile
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netspec_rl = design.NetSpecification.from_prototxt(
netspec.to_prototxt())
# Since we have the test for `to_pbuf_message`, we can assume the
# conversion to prototxt works correctly.
self.assertEqual(netspec_rl.to_prototxt(), netspec.to_prototxt())
# Test file io.
with tempfile.NamedTemporaryFile(mode='r',
suffix=".prototxt") as tmpfile:
netspec.to_prototxt(output_filename=tmpfile.name)
tmpfile.file.flush()
netspec_rl = design.NetSpecification.from_prototxt(
filename=tmpfile.name)
# Test instantiation of a loaded net.
_ = netspec_rl.instantiate() # noqa
def test_copy(self):
"""Test the method ``copy``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netptext = netspec.to_prototxt()
netspec_copy = netspec.copy()
_ = netspec_copy.instantiate() # noqa
netcptext = netspec_copy.to_prototxt()
self.assertEqual(netptext, netcptext)
def test_visualize(self):
"""Test the ``visualize`` function."""
import barrista.design as design
# pylint: disable=W0212
if design._draw is None:
return
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
viz = netspec.visualize()
self.assertEqual(viz.ndim, 3)
def test_instantiate(self):
"""Test the method ``instatiate``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate() # noqa
class LayerSpecificationTestCase(unittest.TestCase):
"""Test the class :py:class:`barrista.design.LayerSpecification`."""
def test_instantiation(self):
"""Test instantiation."""
import barrista.design as design
import copy
sspec = design.LayerSpecification() # noqa
self.assertTrue(sspec == sspec)
cspec = copy.deepcopy(sspec)
self.assertTrue(sspec == cspec)
cspec.include_min_level = 2
self.assertTrue(sspec != cspec)
def test_to_pbuf(self):
"""Test protobuf conversion."""
import barrista.design as design
layerspec = design.LayerSpecification()
with self.assertRaises(AssertionError):
# It is not possible to create an abstract layer without type.
_ = layerspec.to_pbuf_message(0, # noqa
None,
['data'])
# Check the wiring.
layerspec.type = 'convolution'
pbmessage = layerspec.to_pbuf_message(0,
None,
['data'])
self.assertEqual(pbmessage.name, '_layer_0')
self.assertEqual(pbmessage.top[0], '_layer_0')
self.assertEqual(pbmessage.bottom[0], 'data')
layerspec2 = design.LayerSpecification()
layerspec2.type = 'convolution'
pbmessage2 = layerspec2.to_pbuf_message(1,
layerspec,
['data'])
self.assertEqual(pbmessage2.name, '_layer_1')
self.assertEqual(pbmessage2.top[0], '_layer_1')
self.assertEqual(pbmessage2.bottom[0], '_layer_0')
class MonitoringTestCase(unittest.TestCase):
"""Test the monitors."""
def test_ProgressIndicator(self):
"""Test the ``ProgressIndicator``."""
import barrista.design as design
import numpy as np
from barrista.design import ConvolutionLayer, ReLULayer
from barrista.monitoring import ProgressIndicator
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
fitpi = ProgressIndicator()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(10,
solver,
X,
train_callbacks=[fitpi])
if hasattr(fitpi.pbar, 'finished'):
# progressbar2 compatibility.
self.assertEqual(fitpi.pbar.finished, True)
# For predict.
predpi = ProgressIndicator()
net.predict(np.zeros((20, 3, 3, 3)),
test_callbacks=[predpi])
if hasattr(predpi.pbar, 'finished'):
self.assertEqual(predpi.pbar.finished, True)
def test_JSONLogger(self):
"""Test the ``JSONLogger``."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
import json
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista.monitoring import JSONLogger
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(
name='loss',
bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
# For fit.
fitlog = JSONLogger(dirpath,
'tmp',
{'test': ['test_loss',
'test_accuracy'],
'train': ['train_loss',
'train_accuracy']})
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(30,
solver,
X=X,
X_val=X,
test_initialization=True,
test_interval=10,
train_callbacks=[fitlog],
test_callbacks=[fitlog])
# append.
fitlog = JSONLogger(dirpath,
'tmp',
{'test': ['test_loss',
'test_accuracy'],
'train': ['train_loss',
'train_accuracy']})
net.fit(30,
solver,
X=X,
X_val=X,
test_initialization=True,
test_interval=10,
train_callbacks=[fitlog],
test_callbacks=[fitlog])
with open(os.path.join(dirpath, 'barrista_tmp.json'), 'r') as inf:
json_load = json.load(inf)
self.assertIn('train', list(json_load.keys()))
self.assertIn('test', list(json_load.keys()))
self.assertEqual(len(json_load['train']), 12)
self.assertEqual(len(json_load['test']), 16)
shutil.rmtree(dirpath)
# Verify values.
predres = net.predict(X,
out_blob_names=['loss', 'accuracy'],
allow_train_phase_for_test=True)
last_test_loss = [dct['test_loss'] for dct in json_load['test']
if 'test_loss' in dct.keys() and
dct['NumIters'] == 60][0]
self.assertEqual(last_test_loss, predres['loss'][0])
def test_StaticDataMonitor(self):
"""Test the static data monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import StaticDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = StaticDataMonitor(X={'a': np.array(range(3)),
'b': np.array(range(5, 8))})
tmon_test = StaticDataMonitor(X={'a': np.array(range(3)),
'b': np.array(range(5, 8))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
assert len(tmon.get_parallel_blob_names()) == 2
kwargs['callback_signal'] = 'initialize_test'
tmon_test._initialize_test(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
kwargs['callback_signal'] = 'pre_test'
tmon_test._pre_test(kwargs)
tmon_test._pre_test_batch({'testnet': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
def test_CyclingDataMonitor(self):
"""Test the cycling data monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(X={'a': list(range(4)),
'b': np.array(range(5, 9))})
tmon_test = CyclingDataMonitor(X={'a': list(range(4)),
'b': np.array(range(5, 9))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
kwargs['callback_signal'] = 'initialize_test'
with self.assertRaises(Exception):
tmon._initialize_test(kwargs)
tmon_test._initialize(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [3, 0, 1])
assert np.all(net.blobs['b'].data[...] == [8, 5, 6])
kwargs['callback_signal'] = 'pre_test'
tmon_test._pre_test(kwargs)
tmon_test._pre_test_batch({'testnet': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
def test_CyclingDataMonitor_only_preload(self):
"""Test the cycling data monitor preload capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': list(range(4)),
'b': np.array(range(5, 9))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
kwargs = {'net': net, 'testnet': net, 'callback_signal': 'pre_batch'}
tmon._pre_train_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [0, 1, 2])
assert np.all(kwargs['data_orig']['b'] == [5, 6, 7])
tmon._pre_train_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [3, 0, 1])
assert np.all(kwargs['data_orig']['b'] == [8, 5, 6])
tmon._pre_test_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [2, 3, 0])
assert np.all(kwargs['data_orig']['b'] == [7, 8, 5])
def test_CyclingDataMonitor_augmentation(self):
"""Test the cycling data monitor color data augmentation capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[1, 3, 5, 5], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
ddta1 = np.zeros((3, 5, 5))
ddta1[0, 0, 0] = 1.
ddta2 = np.zeros((3, 5, 5))
ddta2[1, 1, 1] = 1.
tmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [ddta1, ddta2],
'b': np.array(range(5, 7))},
color_data_augmentation_sigmas={'a': 0.1})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
# Filling of unspecified sigmas.
self.assertEqual(tmon._color_data_augmentation_sigmas,
{'a': 0.1, 'b': 0.})
# Equal weights for the first two components.
self.assertEqual(len(tmon._color_data_augmentation_weights), 1)
self.assertAlmostEqual(tmon._color_data_augmentation_weights['a'][0],
tmon._color_data_augmentation_weights['a'][1],
delta=0.01)
# Third one zero.
self.assertEqual(tmon._color_data_augmentation_weights['a'][2], 0.)
# Check components: orthogonal first two, zeros for third.
self.assertEqual(len(tmon._color_data_augmentation_components), 1)
self.assertEqual(np.dot(
tmon._color_data_augmentation_components['a'][:, 0].T,
tmon._color_data_augmentation_components['a'][:, 1]),
0.)
self.assertTrue(
np.all(
tmon._color_data_augmentation_components['a'][2, :2] ==
[0, 0]))
self.assertTrue(
np.all(
tmon._color_data_augmentation_components['a'][:2, 2] ==
[0, 0]))
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
kwargs = {'net': net, 'testnet': net, 'callback_signal': 'pre_batch'}
tmon._pre_train_batch(kwargs)
# Test layerwise application.
self.assertTrue(np.all(kwargs['data_orig']['a'][0][2] == 0))
diff0 = ddta1[0, 0, 0] - kwargs['data_orig']['a'][0][0, 0, 0]
# pylint: disable=superfluous-parens
# print(np.max(np.abs(ddta1[0] - kwargs['data_orig']['a'][0][0] -
# diff0)))
self.assertTrue(np.all(np.isclose(
ddta1[0] - kwargs['data_orig']['a'][0][0],
diff0, rtol=1e-04, atol=1e-07)))
diff1 = ddta1[1, 0, 0] - kwargs['data_orig']['a'][0][1, 0, 0]
self.assertTrue(np.all(np.isclose(
ddta1[1] - kwargs['data_orig']['a'][0][1],
diff1, rtol=1e-04, atol=1e-07)))
diff2 = ddta1[2, 0, 0] - kwargs['data_orig']['a'][0][2, 0, 0]
self.assertEqual(diff2, 0.)
self.assertTrue(np.all(np.isclose(
ddta1[2] - kwargs['data_orig']['a'][0][2],
diff2, rtol=1e-04, atol=1e-07)))
def test_CyclingDataMonitor_resizing(self):
"""Test the cycling data monitor resizing capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[3, 3, 10, 10], [3, 3, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
X={'a': [np.ones((3, 5, 5))] * 2,
'b': np.ones((2, 3, 5, 5))},
input_processing_flags={'a': 'rc', 'b':'rn'})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
tmon._pre_train_batch(kwargs)
assert np.all(net.blobs['a'].data[...] == 1.)
assert np.all(net.blobs['b'].data[...] == 1.)
def test_CyclingDataMonitor_padding(self):
"""Test the cycling data monitor padding capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3, 3, 10, 10], [3, 3, 7, 7]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
X={'a': [np.ones((3, 5, 5))] * 2,
'b': np.ones((2, 3, 5, 5))},
input_processing_flags={'a': 'p0', 'b':'p2'})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
tmon._pre_train_batch(kwargs)
assert np.sum(net.blobs['a'].data[...]) == 225.
assert np.sum(net.blobs['b'].data[...]) == 225. + 432.
def test_ResizingMonitor(self):
"""Test the resizing monitor."""
import barrista.design as design
import barrista.solver as sv
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
netspec.layers.append(design.ConvolutionLayer(
Convolution_kernel_size=3,
Convolution_num_output=1))
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
net_input_size_adjustment_multiple_of=2
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 0.)
self.assertEqual(net.blobs['a'].data.shape, (1, 3, 7, 9))
self.assertEqual(np.sum(net.blobs['b'].data[...]), 63.)
self.assertEqual(net.blobs['b'].data.shape, (1, 1, 7, 9))
dmon._pre_test_batch(kwargs)
tmon._pre_test_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
# Check that the parallel filling works.
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
net_input_size_adjustment_multiple_of=2
)
net.fit(3, sv.SGDSolver(base_lr=0.01), train_callbacks=[dmon, tmon])
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
def test_ResizingMonitor_fixed_scale(self):
"""Test the resizing monitor scaling capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
base_scale=2.,
net_input_size_adjustment_multiple_of=2,
interp_methods={'a':'c', 'b':'n'}
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 75.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36.*4. + 50.)
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]),
(15.*19.-14.*18.)*3.)
self.assertEqual(net.blobs['a'].data.shape, (1, 3, 15, 19))
self.assertEqual(np.sum(net.blobs['b'].data[...]),
(15.*19.-14.*18.)*2.+14.*18.)
self.assertEqual(net.blobs['b'].data.shape, (1, 1, 15, 19))
dmon._pre_test_batch(kwargs)
tmon._pre_test_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 75.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36.*4. + 50.)
def test_ResizingMonitor_random_scale(self):
"""Test the resizing monitor random scale capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6))],
'b': [np.ones((1, 6, 6))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
base_scale=2.,
random_change_up_to=0.5,
net_input_size_adjustment_multiple_of=1,
interp_methods={'a':'c', 'b':'n'}
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
scales = []
np.random.seed(1)
for _ in range(1000):
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
scales.append(net.blobs['a'].data.shape[2])
from scipy.stats import chisquare, itemfreq
freq = itemfreq(scales)[:, 1]
_, pvalue = chisquare(freq)
self.assertTrue(pvalue > 0.1)
def test_RotatingMirroringMonitor(self):
"""Test the rotating mirroring monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 2, 'b': 2},
max_rotation_degrees=90.
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data), 54.)
self.assertTrue(np.all(net.blobs['a'].data[:, :, 2:4, 0] == 1.))
self.assertEqual(np.sum(net.blobs['b'].data), 31.)
self.assertTrue(np.all(net.blobs['b'].data[:, :, 2:4, 0] == 0.))
def test_RotatingMirroringMonitor_mirroring(self):
"""Test the rotating mirroring monitor mirroring capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 2, 'b': 2},
max_rotation_degrees=0.,
mirror_prob=0.5
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data), np.sum(adata))
self.assertTrue(np.all(net.blobs['a'].data[:, :, 0, 2:4] == 1.))
self.assertEqual(np.sum(net.blobs['b'].data), np.sum(bdata))
self.assertTrue(np.all(net.blobs['b'].data[:, :, 0, 2:4] == 0.))
def test_RotatingMirroringMonitor_mirroring_swapvalues(self):
"""Test the rotating mirroring monitor mirroring swap capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 3, 'b': 3},
max_rotation_degrees=0.,
mirror_prob=0.5,
mirror_value_swaps={'a': {1: [(0, 1), (1, 2)]}},
mirror_layer_swaps={'a': [(1, 2)]}
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[:, (0, 1), :, :]),
np.sum(adata[(0, 2), :, :]))
self.assertEqual(np.sum(net.blobs['a'].data[:, 2, :, :]),
np.sum(adata[1, :, :]+1))
self.assertTrue(np.all(net.blobs['a'].data[:, (0, 1), 0, 2:4] == 1.))
self.assertTrue(np.all(net.blobs['a'].data[:, 2, 0, 2:4] == 2.))
self.assertEqual(np.sum(net.blobs['b'].data), np.sum(bdata))
self.assertTrue(np.all(net.blobs['b'].data[:, :, 0, 2:4] == 0.))
def test_Checkpointer(self):
"""Test the ``Checkpointer``."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath+os.sep, 10)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(30,
solver,
X=X,
train_callbacks=[chckptr])
dircontents = os.listdir(dirpath)
self.assertIn('_iter_2.caffemodel', dircontents)
self.assertIn('_iter_3.caffemodel', dircontents)
if hasattr(solver._solver, 'snapshot'):
self.assertIn('_iter_2.solverstate', dircontents)
self.assertIn('_iter_3.solverstate', dircontents)
shutil.rmtree(dirpath)
def test_GradientMonitor(self):
"""Test the ``GradientMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import GradientMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[GradientMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_hists_{}.png'.format(idx))))
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_magnitude_{}.png'.format(idx))))
net.fit(100,
solver,
X,
train_callbacks=[GradientMonitor(10,
tmpdir + os.sep,
relative=True)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_hists_rel_{}.png'.format(idx))))
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_magnitude_rel_{}.png'.format(idx))))
def test_ActivationMonitor(self):
"""Test the ``ActivationMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import ActivationMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'name': 'conv',
'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[ActivationMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'activations_conv_{}.png'.format(idx))))
def test_FilterMonitor(self):
"""Test the ``FilterMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import FilterMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'name': 'conv',
'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[FilterMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'parameters_conv_0_{}.png'.format(idx))))
class NetTestCase(unittest.TestCase):
"""Test the new ``Net`` functions."""
def test_instantiation(self):
"""Test ``Net`` constructors."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer, Phase
from barrista.net import Net
import tempfile
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
with tempfile.NamedTemporaryFile(mode='r',
suffix=".prototxt") as tmpfile:
netspec.to_prototxt(output_filename=tmpfile.name)
tmpfile.file.flush()
net = Net(tmpfile.name, Phase.TEST)
# In older versions of caffe, the input layer was not visible.
self.assertTrue(len(net.layers) in [2, 3])
self.assertEqual(net.blobs[net.inputs[0]].data.shape, (10, 3, 3, 3))
self.assertTrue(net.blobs[net.inputs[1]].data.shape == (10,) or
net.blobs[net.inputs[1]].data.shape == (10, 1, 1, 1))
def test_dual_net_use(self):
"""Test the specification of a prediction net."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer,
SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
predictions = np.array(net.predict(np.zeros((10, 3, 3, 3))))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Force to use the fit network.
accy = net.predict(X,
use_fit_network=True,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_reshape_blob(self):
"""Test the reshaping of a blob across nets."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer,
SoftmaxWithLossLayer, AccuracyLayer,
SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10, 1, 3, 3]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1,
'name': 'out'}
layers.append(ConvolutionLayer(**conv_params))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
net.reshape_blob('data', 10, 3, 5, 5)
net.blobs['annotations'].reshape(10, 1, 5, 5)
X = {'data': np.zeros((10, 3, 5, 5), dtype='float32'),
'annotations': np.ones((10, 1, 5, 5), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
predictions = np.array(net.predict(np.zeros((10, 3, 5, 5))))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 250)
# Force to use the fit network.
accy = net.predict(X,
use_fit_network=True,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_load_blobs_from(self):
"""Test the loading method."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(name='outlbf',
InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath + os.sep, 10)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(20,
solver,
X=X,
train_callbacks=[chckptr])
checkp0_data = net.params['_layer_0'][0].data.copy()
net.params['_layer_0'][0].data[...] = 10.
assert np.any(net.params['_layer_0'][0].data != checkp0_data)
net.load_blobs_from(os.path.join(dirpath, '_iter_2.caffemodel'))
assert np.all(net.params['_layer_0'][0].data == checkp0_data)
if (hasattr(solver._solver, 'restore') and
hasattr(solver._solver, 'snapshot')):
# Check for newer versions of caffe the solver restore method.
solver._solver.restore(os.path.join(dirpath, '_iter_2.solverstate'))
shutil.rmtree(dirpath)
def test_multiinput(self):
"""Test multiinput prediction."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
if CV2_AVAILABLE:
accy = net.predict(X,
input_processing_flags={'data': 'rc',
'annotations': 'n'},
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
accy = net.predict(X, input_processing_flags={'data': 'p0',
'annotations': 'n'},
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_multioutput(self):
"""Test multioutput prediction."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 7, 7], [10, 1, 7, 7]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 7, 7]])
layers = []
layers.append(ConvolutionLayer(Convolution_kernel_size=3,
Convolution_num_output=1,
Convolution_pad=1,
name='conv1',
tops=['conv1_out']))
layers.append(ConvolutionLayer(Convolution_kernel_size=3,
Convolution_num_output=1,
Convolution_pad=1,
name='conv2',
tops=['conv2_out'],
bottoms=['data']))
layers.append(EuclideanLossLayer(name='loss1',
bottoms=['conv1_out', 'annotations'],
include_stages=['fit']))
layers.append(EuclideanLossLayer(name='loss2',
bottoms=['conv2_out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 7, 7), dtype='float32'),
'annotations': np.ones((10, 1, 7, 7), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
pred = net.predict([np.zeros((3, 3, 3))],
input_processing_flags={'data': 'p0'},
output_processing_flags={'conv1_out': 'p0',
'conv2_out': 'n'})
assert pred['conv1_out'][0].shape == (1, 3, 3)
assert pred['conv2_out'][0].shape == (1, 7, 7)
pred = net.predict([np.zeros((3, 3, 3))],
input_processing_flags={'data': 'p0'},
output_processing_flags={'conv1_out': 'n',
'conv2_out': 'p0'})
assert pred['conv1_out'][0].shape == (1, 7, 7)
assert pred['conv2_out'][0].shape == (1, 3, 3)
def test_predict_sliding_window(self):
"""Test the ``predict_sliding_window`` method."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=1,
tops=['out']))
layers.append(EuclideanLossLayer(name='se',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
# Rescaling.
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5))))
self.assertEqual(np.sum(predictions != 0.), 90)
# Step size.
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2)))
self.assertEqual(np.sum(predictions != 0.), 90)
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2),
account_for_step=False,
pad_border=False))
self.assertEqual(np.sum(predictions != 0.), 60)
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2),
account_for_step=True,
pad_border=False))
self.assertEqual(np.sum(predictions != 0.), 90)
def test_predict_sliding_window_eq_out(self):
"""Test the ``predict_sliding_window`` method with full size output."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3]],
inputs=['data'])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
netspec.layers.extend(layers)
net = netspec.instantiate()
_ = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5))))
_ = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
overlap_combine_max=False))
def test_predict(self):
"""Test the ``predict`` method."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
if CV2_AVAILABLE:
# Rescaling.
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'rl'}))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Padding.
predictions_padded = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'}))
predictions = np.argmax(predictions_padded, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# out_layer_names.
predictions = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
out_blob_names=['out'],
input_size_spec=(10, 10)))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Static inputs.
predictions = np.array(net.predict(
{'data': np.zeros((10, 3, 3, 3))},
static_inputs=['data']))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Upscaling.
_ = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
output_processing_flags={'softmax': 'p0'}))
# Oversample.
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
oversample=True))
np.testing.assert_allclose(predictions, predictions_padded, rtol=1e-05)
if CV2_AVAILABLE:
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
oversample=True,
before_oversample_resize_to=(5, 5)))
np.testing.assert_allclose(predictions, predictions_padded, rtol=1e-05)
def test_predict_upscaling(self):
"""Test the ``predict`` method upscaling capability."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10, 1, 1, 1]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 0,
'name': 'out'}
layers.append(ConvolutionLayer(**conv_params))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
# Upscaling.
predictions_us = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
output_processing_flags={'softmax': 'p0'}))
self.assertTrue(np.all(predictions_us.shape == (10, 3, 1, 1)))
def test_visualize(self):
"""Test the ``visualize`` function."""
import barrista.design as design
# pylint: disable=W0212
if design._draw is None:
return
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
net = netspec.instantiate()
viz = net.visualize()
self.assertEqual(viz.ndim, 3)
class ToolsTestCase(unittest.TestCase):
"""Test the tools module."""
def test_pad(self):
"""Test the padding function."""
import numpy as np
from barrista import tools
tim = np.ones((3, 1, 1))
padded = tools.pad(tim, (3, 3))
aim = np.zeros((3, 3, 3))
aim[:, 1, 1] = 1.
self.assertTrue(np.all(aim == padded))
padded, padding = tools.pad(tim, (3, 3), get_padding=True)
aim = np.zeros((3, 3, 3))
aim[:, 1, 1] = 1.
self.assertTrue(np.all(aim == padded))
self.assertEqual(padding, ((0, 0), (1., 1.), (1., 1.)))
class ExampleTestCase(unittest.TestCase):
"""Test that the example runs successfully."""
def test_running(self):
"""Run it."""
import sys
import subprocess
subprocess.check_call([sys.executable,
'examples/showcase.py'])
class SolverTestCase(unittest.TestCase):
"""Test the tools module."""
def test_fit(self):
"""Test the fit function."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(base_lr=0.01)
solver.fit(20,
net=net,
X=X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_net = netspec.instantiate()
new_solver = _solver.SGDSolver(net=new_net,
base_lr=0.01)
new_solver.fit(20,
X)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_net = netspec.instantiate()
new_solver = _solver.SGDSolver(net=new_net,
base_lr=0.01)
new_solver.fit(20,
X,
use_fit_phase_for_validation=True)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_solver.fit(20,
X,
X_val=X,
test_initialization=True,
test_interval=10,
use_fit_phase_for_validation=True)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_restore(self):
"""Test the ``restore`` method."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, PROTODETAIL)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1,
'Convolution_weight_filler':
PROTODETAIL.FillerParameter(type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(name='outlbf',
InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath + os.sep, 10)
X = {'data': np.ones((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(30,
solver,
X=X,
train_callbacks=[chckptr])
if not (hasattr(solver._solver, 'restore') and
hasattr(solver._solver, 'snapshot')):
return
newsolver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
newnet = netspec.instantiate()
newsolver.restore(os.path.join(dirpath, '_iter_2.solverstate'),
newnet)
newsolver.fit(10,
X=X)
self.assertTrue(np.all(net.params['_layer_0'][0].data[...] ==
newnet.params['_layer_0'][0].data[...]))
shutil.rmtree(dirpath)
def test_sgd(self):
"""Test the stochastic gradient descent."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test sgd solver
#######################################################################
from barrista import solver as _solver
tmp = _solver.Get_solver_class('sgd')
self.assertTrue(issubclass(tmp, _solver.SGDSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.SGD)
self.assertTrue(issubclass(tmp, _solver.SGDSolver))
with self.assertRaises(KeyError):
_ = _solver.Get_solver_class('test')
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 3
params['regularization_type'] = '--'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='step').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='xx').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='exp').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
lr_policy='sigmoid').Get_parameter_dict()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = tmp(base_lr=0.01)
solver.fit(20,
X,
net=net)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
solver = tmp(net=net,
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_nesterov(self):
"""Test the nesterov solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test nesterov solver
#######################################################################
from barrista import solver as _solver
tmp = _solver.Get_solver_class('nesterov')
self.assertTrue(issubclass(tmp, _solver.NesterovSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.NESTEROV)
self.assertTrue(issubclass(tmp, _solver.NesterovSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['momentum'] = 1.
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 1.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 3
params['regularization_type'] = '--'
del params['momentum']
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
del params['regularization_type']
params['lr_policy'] = 'step'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
params['lr_policy'] = 'xx'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
params['lr_policy'] = 'exp'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
lr_policy='sigmoid').Get_parameter_dict()
solver = tmp(base_lr=0.01,
momentum=0.95)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_rmsprop(self):
"""Test the RMSProp solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test rmsprop solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'RMSPROP'):
return
tmp = _solver.Get_solver_class('rmsprop')
self.assertTrue(issubclass(tmp, _solver.RMSPropSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.RMSPROP)
self.assertTrue(issubclass(tmp, _solver.RMSPropSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
with self.assertRaises(Exception):
tmp(base_lr=2,
delta=0.1)
tmp_instance = tmp(base_lr=2,
delta=0.1,
rms_decay=0.9)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
tmp_instance = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
tmp_instance = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=2,
regularization_type='L1')
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=3,
regularization_type='--').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='step').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='xx').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='exp').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='sigmoid').Get_parameter_dict()
solver = tmp(base_lr=2,
delta=0.1,
rms_decay=0.9)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adadelta(self):
"""Test the Adadelta solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test AdaDelta solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADADELTA'):
return
tmp = _solver.Get_solver_class('adadelta')
self.assertTrue(issubclass(tmp, _solver.AdaDeltaSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADADELTA)
self.assertTrue(issubclass(tmp, _solver.AdaDeltaSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
with self.assertRaises(Exception):
tmp(base_lr=2,
delta=0.1)
tmp_instance = tmp(base_lr=2,
momentum=0.9)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 1E-8)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'momentum': 0.9, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001,
momentum=0.9)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adagrad(self):
"""Test the AdaGrad solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test AdaGrad solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADAGRAD'):
return
tmp = _solver.Get_solver_class('adagrad')
self.assertTrue(issubclass(tmp, _solver.AdagradSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADAGRAD)
self.assertTrue(issubclass(tmp, _solver.AdagradSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
tmp_instance = tmp(base_lr=2,
delta=0.1)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001,
delta=0.1)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adam(self):
"""Test the ADAM solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test adam solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADAM'):
return
tmp = _solver.Get_solver_class('adam')
self.assertTrue(issubclass(tmp, _solver.AdamSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADAM)
self.assertTrue(issubclass(tmp, _solver.AdamSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 0.999)
self.assertEqual(solver_parameter_dict['delta'], 1E-8)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 0.999)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['momentum2'] = 1.
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 1.0)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
if __name__ == '__main__':
unittest.main()
| classner/barrista | tests.py | Python | mit | 108,780 | 0.000276 |
# coding=utf-8
import pprint
import config
import json
import urllib
import requests
class Driver(object):
def __init__(self):
self.driver_type = self.__class__.__name__
# Get credentials from conf files for CMDB
pass
def get_driver_type(self):
return self.driver_type
def get_ci(self, ci):
pass
def set_ci(self, ci):
pass
class Itop(Driver):
def get_ci(self, ci):
print("Get from itop")
return True
def set_ci(self, ci):
username = config.alexandria.conf_file.get_driver_parameters("itop", "loginItop")
password = config.alexandria.conf_file.get_driver_parameters("itop", "passwordItop")
config.logger.debug("login : {}, password : {}".format(
username,
password
)
)
# Craft request body and header
urlbase = config.alexandria.conf_file.get_driver_parameters("itop", "endpoint")
request = '{"operation":"core/create","comment":"Synchronization from Alexandria","class":"Server","output_fields":"id,name,ram", "fields":{"org_id": "3","name":"' + ci.data["Name"] + '","ram":"' + format((ci.data["MemorySummary"])["TotalSystemMemoryGiB"]) + '","serialnumber":"' + ci.data["SerialNumber"] + '"}}'
urlparam = {'version' : '1.0',
'auth_user' : username,
'auth_pwd' : password,
'json_data' : request
}
#header = {'Content-type': 'application/json'}
url = urlbase + '?' + urllib.urlencode(urlparam)
config.logger.debug(url)
#=======================================================================
# answer = requests.post(url,
# headers=header,
# verify="False"
# )
#=======================================================================
answer = requests.post(url,
auth=(username,password)
)
config.logger.debug(answer.status_code)
config.logger.debug(answer.text)
class Redfish(Driver):
def get_ci(self,ci):
print("Get from redfish")
import redfish
print(ci.ip_mgmt + " - " + ci.login + " - " + ci.password)
#remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, verify_cert=False)
remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, simulator=True, enforceSSL=False)
ci.ci_type = remote_mgmt.Systems.systems_list[0].get_parameter("@odata.type")
ci.data = remote_mgmt.Systems.systems_list[0].get_parameters()
#print("Redfish API version : {} \n".format(remote_mgmt.get_api_version()))
return True
def set_ci(self, ci):
print "Push to Redfish"
return True
class Ironic(Driver):
pass
class Mondorescue(Driver):
pass
class Fakecmdb(Driver):
def set_ci(self, ci):
# Determine ci type so we can do the proper action.
pp = pprint.PrettyPrinter(indent=4)
if ci.ci_type == "Manager":
print("We are in Fakecmdb driver !")
pp.pprint(ci.data)
# Simply write a json file with ci.data content.
with open("Fakecmdb.json", "w") as jsonfile:
json.dump(ci.data, jsonfile, indent=4)
jsonfile.close()
#
#=======================================================================
class Fakeprovider(Driver):
def get_ci(self, ci):
# Simulate a driver that will provide Manager data.
# TODO a connect method must be implemented
# Assuming the connection is ok.
# Now create a copy of manager model from reference model.
#ci.ci_type = "Manager"
#ci.data = config.alexandria.model.get_model("Manager")
# Update the structure with data
# TODO : think to encapsulate to not edit ci.data directly.
# This could be also a way to check source of truth.
# If data provided by our driver is not the source of truth
# then discard it.
#ci.data["ManagerType"] = "BMC"
#ci.data["Model"] = "Néné Manager"
#ci.data["FirmwareVersion"] = "1.00"
#if ci.data is config.alexandria.model.Manager:
# print "identical"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(ci.ci_type)
class DriverCollection(list):
pass
| uggla/alexandria | alexandria/drivers.py | Python | apache-2.0 | 4,848 | 0.010111 |
from gtk import *
import sys
import gview
import string
import gvutils
import GtkExtra
import time
import os
import gviewapp
import gvplot
import gdal
sys.path.append('.')
import ev_profile
from quality_hist_tool import QualityHistogramROITool
LYR_GENERIC = 0
LYR_LANDSAT8 = 1
LYR_SOURCE_TRACE = 2
LYR_QUALITY = 3
LYR_NOT_RASTER = 4
def layer_class(layer):
try:
dataset = layer.get_parent().get_dataset()
except:
return LYR_NOT_RASTER
if (dataset.GetRasterBand(1).DataType == gdal.GDT_UInt16 or dataset.GetRasterBand(1).DataType == gdal.GDT_Int16) and dataset.RasterCount == 4:
return LYR_LANDSAT8
if dataset.GetDescription().find('st-') != -1:
return LYR_SOURCE_TRACE
if dataset.RasterCount > 1 and dataset.GetRasterBand(1).DataType == gdal.GDT_Float32:
return LYR_QUALITY
return LYR_GENERIC
class MosaicViewerTool(gviewapp.Tool_GViewApp):
def __init__(self,app=None):
gviewapp.Tool_GViewApp.__init__(self,app)
self.init_menu()
self.hist_tool = QualityHistogramROITool(app)
self.graphing = False
def launch_dialog(self,*args):
self.win = MosaicDialog(app=gview.app, tool=self)
self.win.show()
self.win.rescale_landsat_cb()
self.win.gui_refresh()
self.track_view_activity()
def key_down_cb(self, viewarea, event):
try:
print 'down %s/%d' % (chr(event.keyval), event.keyval)
except:
print 'down <undefined>/%d' % event.keyval
if event.keyval == ord('g'):
if not self.graphing:
print 'enable graphing'
self.graphing = True
else:
print 'disable graphing'
self.graphing = False
def key_up_cb(self, viewarea, event):
try:
print 'up %s/%d' % (chr(event.keyval), event.keyval)
except:
print 'up <undefined>/%d' % event.keyval
def mouse_cb(self, viewarea, event):
#print 'mouse event:', event.type
if event.type == 4:
print event.type, event.button, event.state, event.x, event.y
if self.graphing and event.button == 1:
ev_profile.graph(viewarea.map_pointer((event.x, event.y)))
elif event.type == 3:
#print event.x, event.y
#print viewarea.map_pointer((event.x, event.y))
#if self.graphing:
# ev_profile.graph(viewarea.map_pointer((event.x, event.y)))
pass
def track_view_activity(self):
view = gview.app.view_manager.get_active_view_window()
view.viewarea.connect('key-press-event', self.key_down_cb)
view.viewarea.connect('key-release-event', self.key_up_cb)
view.viewarea.connect('motion-notify-event', self.mouse_cb)
view.viewarea.connect('button-press-event', self.mouse_cb)
def init_menu(self):
self.menu_entries.set_entry("Tools/Mosaic Viewer",2,
self.launch_dialog)
class MosaicDialog(GtkWindow):
def __init__(self,app=None, tool=None):
self.tool = tool
self.updating = False
GtkWindow.__init__(self)
self.quality_layer = None
self.set_title('Mosaic Viewer')
self.create_gui()
self.show()
self.gui_refresh()
def show(self):
GtkWindow.show_all(self)
def close(self, *args):
self.hide()
self.visibility_flag = 0
return TRUE
def set_quality_band_cb(self,*args):
if self.updating or self.quality_layer is None:
return
try:
scale_min = float(self.min_entry.get_text())
except:
scale_min = 0.0
try:
scale_max = float(self.max_entry.get_text())
except:
scale_max = 1.0;
dataset = self.quality_layer.get_parent().get_dataset()
new_select = None
new_text = self.band_combo.entry.get_text()
for i in range(len(self.quality_band_names)):
if new_text == self.quality_band_names[i]:
new_select = i+1
raster = gview.manager.get_dataset_raster( dataset, new_select)
for isrc in range(3):
self.quality_layer.set_source(isrc, raster, scale_min, scale_max)
self.tool.hist_tool.analyze_cb()
def quality_refresh(self):
assert self.quality_layer is not None
dataset = self.quality_layer.get_parent().get_dataset()
self.quality_band_names = []
for band_num in range(1,dataset.RasterCount+1):
self.quality_band_names.append(
dataset.GetRasterBand(band_num).GetMetadata()['DESCRIPTION'])
self.band_combo.set_popdown_strings( self.quality_band_names)
def gui_refresh(self):
if self.quality_layer is not None:
self.quality_refresh()
def adjustment_cb(self,adjustment,*args):
if self.updating or self.quality_layer is None:
return
value = adjustment.value
if adjustment == self.min_adjustment:
self.min_entry.set_text(str(value))
else:
self.max_entry.set_text(str(value))
self.set_quality_band_cb()
def entry_cb(self,entry,*args):
if self.updating:
return
self.set_quality_band_cb()
def find_tool(self, tool_name):
for (name, tool_inst) in gview.app.Tool_List:
if name == tool_name:
return tool_inst
return None
def create_gui(self):
vbox = GtkVBox(spacing=5)
vbox.set_border_width(10)
self.add(vbox)
# Add the Quality Band Selection Combo
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox,expand=FALSE)
hbox.pack_start(GtkLabel('Quality:'), expand=FALSE)
self.band_combo = GtkCombo()
hbox.pack_start(self.band_combo)
self.band_combo.entry.connect('changed', self.set_quality_band_cb)
self.band_combo.set_popdown_strings(
['XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'])
band_list = ['inactive']
self.band_combo.set_popdown_strings( band_list )
# ------ Quality Scale Min -------
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox)
hbox.pack_start(GtkLabel('Scale Min:'),expand=FALSE)
self.min_adjustment = GtkAdjustment(0.0, 0.0, 1.0, 0.05, 0.05, 0.05)
self.min_adjustment.connect('value-changed',self.adjustment_cb)
self.min_slider = GtkHScale(self.min_adjustment)
self.min_slider.set_digits(3)
hbox.pack_start(self.min_slider)
self.min_entry = GtkEntry(maxlen=8)
self.min_entry.connect('activate',self.entry_cb)
self.min_entry.connect('leave-notify-event',self.entry_cb)
self.min_entry.set_text('0.0')
hbox.pack_start(self.min_entry,expand=FALSE)
# ------ Quality Scale Max -------
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox)
hbox.pack_start(GtkLabel('Scale Max:'),expand=FALSE)
self.max_adjustment = GtkAdjustment(1.0, 0.0, 1.0, 0.05, 0.05, 0.05)
self.max_adjustment.connect('value-changed',self.adjustment_cb)
self.max_slider = GtkHScale(self.max_adjustment)
self.max_slider.set_digits(3)
hbox.pack_start(self.max_slider)
self.max_entry = GtkEntry(maxlen=8)
self.max_entry.connect('activate',self.entry_cb)
self.max_entry.connect('leave-notify-event',self.entry_cb)
self.max_entry.set_text('1.0')
hbox.pack_start(self.max_entry,expand=FALSE)
# Add the Rescale and Close action buttons.
box2 = GtkHBox(spacing=10)
vbox.add(box2)
box2.show()
execute_btn = GtkButton("Histogram")
execute_btn.connect("clicked", self.tool.hist_tool.roipoitool_cb)
box2.pack_start(execute_btn)
execute_btn = GtkButton("Rescale")
execute_btn.connect("clicked", self.rescale_landsat_cb)
box2.pack_start(execute_btn)
execute_btn = GtkButton("Reload")
execute_btn.connect("clicked", self.reload_cb)
box2.pack_start(execute_btn)
close_btn = GtkButton("Close")
close_btn.connect("clicked", self.close)
box2.pack_start(close_btn)
def reload_cb(self, *args):
import quality_hist_tool
import ev_mosaic_viewer
print 'Attemping reload:'
reload(ev_profile)
reload(quality_hist_tool)
reload(ev_mosaic_viewer)
print 'Reload apparently successful.'
def rescale_landsat_cb( self, *args ):
view = gview.app.view_manager.get_active_view_window()
layers = view.viewarea.list_layers()
for layer in layers:
lclass = layer_class(layer)
if lclass == LYR_LANDSAT8:
for isrc in range(3):
layer.set_source(isrc, layer.get_data(isrc),
4000, 12000)
dataset = layer.get_parent().get_dataset()
alpha_raster = gview.manager.get_dataset_raster( dataset, 4 )
layer.set_source(3, alpha_raster, 0 , 255)
if lclass == LYR_QUALITY:
if self.quality_layer is None:
self.quality_layer = layer
layer.set_source(0, layer.get_data(0), 0.0, 1.0)
layer.set_source(1, layer.get_data(0), 0.0, 1.0)
layer.set_source(2, layer.get_data(0), 0.0, 1.0)
self.gui_refresh()
TOOL_LIST = ['MosaicViewerTool']
| warmerdam/plcompositor | oev_tools/ev_mosaic_viewer.py | Python | apache-2.0 | 9,675 | 0.007649 |
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
# Handle the case where the requests module has been patched to not have
# urllib3 bundled as part of its source.
try:
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
except ImportError:
from pip._vendor.urllib3.response import HTTPResponse
try:
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
except ImportError:
from pip._vendor.urllib3.util import is_fp_closed
# Replicate some six behaviour
try:
text_type = unicode
except NameError:
text_type = str
| ncos/lisa | src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/cachecontrol/compat.py | Python | mit | 724 | 0 |
#!/usr/bin/env python
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import jagpdf
import jag.testlib as testlib
def test_main(argv=None):
doc = testlib.create_test_doc(argv, 'defaultfont2.pdf')
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.page_start(200, 48)
canvas = doc.page().canvas()
canvas.state_save()
courier = doc.font_load('standard;name=Courier;size=10')
canvas.text_font(courier)
canvas.text(10, 10, 'written in Courier')
canvas.state_restore()
doc.page().canvas().text(10, 30, 'written in the default font')
doc.page_end()
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.finalize()
if __name__ == "__main__":
test_main()
| jgresula/jagpdf | code/test/apitest/py/defaultfont2.py | Python | mit | 952 | 0.003151 |
import os
import pathlib
import locale
import time
from . import xn_logger
logger = xn_logger.get(__name__, debug=False)
# Incapsulates downloaded pages storage
# keeps all downloaded files in ./cache
# the file first requested from this cache,
# and only if get() returns None, it will be
# downloaded over network
class XNovaPageCache:
def __init__(self):
self._pages = {}
self._mtimes = {}
self._page_cache_dir = './cache/page'
self._img_cache_dir = './cache/img'
self.save_load_encoding = locale.getpreferredencoding()
logger.info('Locale preferred encoding: {0}'.format(self.save_load_encoding))
# scan ./cache/page directory and load all files into memory
def load_from_disk_cache(self, clean=True):
if clean:
self._pages = {}
self._mtimes = {}
cache_dir = pathlib.Path(self._page_cache_dir)
if not cache_dir.exists():
try:
cache_dir.mkdir(parents=True)
logger.info('Created pages cache dir')
except OSError as ose:
logger.error('Cannot create page cache dir: {0}'.format(str(ose)))
num_loaded = 0
for subitem in cache_dir.iterdir():
if subitem.is_file():
try:
# get file last modification time
stt = subitem.stat()
mtime = int(stt.st_mtime)
with subitem.open(mode='rt', encoding=self.save_load_encoding) as f:
fname = subitem.name
contents = f.read()
self._pages[fname] = contents # save file contents
self._mtimes[fname] = mtime # save also modification time
num_loaded += 1
except IOError as ioe:
pass
except UnicodeDecodeError as ude:
logger.error('Encoding error in [{0}], skipped: {1}'.format(subitem.name, str(ude)))
logger.info('Loaded {0} cached pages.'.format(num_loaded))
# ensure that image cache dir also exists
cache_dir = pathlib.Path(self._img_cache_dir)
if not cache_dir.exists():
try:
cache_dir.mkdir(parents=True)
logger.info('Created images cache dir')
except OSError as ose:
logger.error('Cannot create img cahe dir: {0}'.format(str(ose)))
# save page into cache
def set_page(self, page_name, contents):
if page_name is None:
return
self._pages[page_name] = contents
self._mtimes[page_name] = int(time.time()) # also update modified time!
try:
fn = os.path.join(self._page_cache_dir, page_name)
f = open(fn, mode='wt', encoding=self.save_load_encoding)
# f = open(fn, mode='wt')
f.write(contents)
f.close()
except IOError as ioe:
logger.error('set_page("{0}", ...): IOError: {1}'.format(page_name, str(ioe)))
except UnicodeEncodeError as uee:
logger.critical('set_page("{0}", ...): UnicodeEncodeError: {1}'.format(page_name, str(uee)))
logger.critical(' self.save_load_encoding is "{0}"'.format(self.save_load_encoding))
def save_image(self, img_path: str, img_bytes: bytes):
img_path_plain = img_path.replace('/', '_')
filename = os.path.join(self._img_cache_dir, img_path_plain)
try:
with open(filename, mode='wb') as f:
f.write(img_bytes)
except IOError as ioe:
logger.error('image [{0}] save failed: [{1}]'.format(filename, str(ioe)))
# get page from cache
# the file first requested from this cache,
# and only if get() returns None, it will be
# downloaded over network
def get_page(self, page_name, max_cache_secs=None):
if page_name is None:
return None
if len(page_name) < 1:
return None
if page_name in self._pages:
# should we check file cache time?
if max_cache_secs is None:
# do not check cache time, just return
return self._pages[page_name]
# get current time
tm_now = int(time.time())
tm_cache = self._mtimes[page_name]
tm_diff = tm_now - tm_cache
if tm_diff <= max_cache_secs:
return self._pages[page_name]
logger.info('cache considered invalid for [{0}]: {1}s > {2}s'.format(page_name, tm_diff, max_cache_secs))
return None
| minlexx/xnovacmd | ui/xnova/xn_page_cache.py | Python | gpl-2.0 | 4,643 | 0.002585 |
from decimal import Decimal
from django.conf import settings
from coupons.models import Coupon
from shop.models import Product
class Cart(object):
def __init__(self, request):
"""
initialize the cart.
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
#store current applied coupon
self.coupon_id = self.session.get('coupon_id')
def add(self, product, quantity=1, update_quantity=False):
"""
Add a product to the cart or update it quantity
"""
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0,
'price': str(product.price)}
if update_quantity:
print('quantity', quantity)
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
def save(self):
# Update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
#mark the session as "modified" to make sure it is saved
self.session.modified = True
def remove(self, product):
"""
Remove a product from the cart.
"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""
Iterate over the items in the cart and get the products
from the database.
"""
product_ids = self.cart.keys()
# get the product objects and add them to the cart
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
"""
Count all items in the cart.
"""
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in
self.cart.values())
def clear(self):
# remove cart from session
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount(self):
if self.coupon:
return (self.coupon.discount / Decimal('100')) \
* self.get_total_price()
return Decimal('0')
def get_total_price_after_discount(self):
return self.get_total_price() - self.get_discount()
| spectrumone/online-shop-template | myshop/cart/cart.py | Python | mit | 3,017 | 0.001326 |
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
quaternary = GaInAsSb
T = 300
# initialize the plot
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Lattice Parameter at %g K ($\AA$)' % T)
plt.ylabel('Bandgap at %g K (eV)' % T)
# plot the binaries
xs = []
y_Gamma = []
y_X = []
y_L = []
labels = []
for b in quaternary.binaries:
xs.append(b.a(T=T))
y_Gamma.append(b.Eg_Gamma(T=T))
y_X.append(b.Eg_X(T=T))
y_L.append(b.Eg_L(T=T))
labels.append(b.name)
ax.plot(xs, y_Gamma, 'r.')
ax.plot(xs, y_X, 'b.')
ax.plot(xs, y_L, 'g.')
# label the binaries
for x, y, label in zip(xs, y_Gamma, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
for x, y, label in zip(xs, y_X, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
for x, y, label in zip(xs, y_L, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
# plot the quaternary
indices = numpy.arange(100)
fractions = numpy.linspace(0, 1, 100)
x = numpy.empty(100, dtype=numpy.float)
y_Gamma = numpy.empty(100, dtype=numpy.float)
y_X = numpy.empty(100, dtype=numpy.float)
y_L = numpy.empty(100, dtype=numpy.float)
first = True
for xfrac in numpy.linspace(0, 1, 10):
for i, yfrac in zip(indices, fractions):
instance = quaternary(x=xfrac, y=yfrac)
x[i] = instance.a(T=T)
y_Gamma[i] = instance.Eg_Gamma(T=T)
y_X[i] = instance.Eg_X(T=T)
y_L[i] = instance.Eg_L(T=T)
if first:
ax.plot(x, y_Gamma, 'r-', label='$\Gamma$')
ax.plot(x, y_X, 'b-', label='$X$')
ax.plot(x, y_L, 'g-', label='$L$')
first = False
else:
ax.plot(x, y_Gamma, 'r-')
ax.plot(x, y_X, 'b-')
ax.plot(x, y_L, 'g-')
for yfrac in numpy.linspace(0, 1, 10):
for i, xfrac in zip(indices, fractions):
instance = quaternary(x=xfrac, y=yfrac)
x[i] = instance.a(T=T)
y_Gamma[i] = instance.Eg_Gamma(T=T)
y_X[i] = instance.Eg_X(T=T)
y_L[i] = instance.Eg_L(T=T)
ax.plot(x, y_Gamma, 'r--')
ax.plot(x, y_X, 'b--')
ax.plot(x, y_L, 'g--')
plt.xlim(6, 6.5)
plt.ylim(0, 0.8)
plt.legend(loc='best')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show() | scott-maddox/openbandparams | src/openbandparams/examples/advanced/GaInAsSb_on_GaSb/Plot_Bandgap_vs_Lattice_Constant_of_Quaternary3.py | Python | agpl-3.0 | 3,726 | 0.001879 |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
from past.utils import old_div
import unittest
import json
import logging
import re
from datetime import datetime, timedelta
# Our imports
from emission.analysis.result import carbon
import emission.core.get_database as edb
from emission.core.get_database import get_mode_db, get_section_db
import emission.tests.common as etc
from emission.core import common
class TestCarbon(unittest.TestCase):
def setUp(self):
from copy import copy
self.testUsers = ["[email protected]", "[email protected]", "[email protected]",
"[email protected]", "[email protected]"]
self.serverName = 'localhost'
# Sometimes, we may have entries left behind in the database if one of the tests failed
# or threw an exception, so let us start by cleaning up all entries
etc.dropAllCollections(edb._get_current_db())
self.ModesColl = get_mode_db()
self.assertEquals(self.ModesColl.estimated_document_count(), 0)
etc.loadTable(self.serverName, "Stage_Modes", "emission/tests/data/modes.json")
etc.loadTable(self.serverName, "Stage_Sections", "emission/tests/data/testCarbonFile")
self.SectionsColl = get_section_db()
self.walkExpect = 1057.2524056424411
self.busExpect = 2162.668467546699
self.busCarbon = old_div(267.0,1609)
self.airCarbon = old_div(217.0,1609)
self.driveCarbon = old_div(278.0,1609)
self.busOptimalCarbon = old_div(92.0,1609)
self.now = datetime.now()
self.dayago = self.now - timedelta(days=1)
self.weekago = self.now - timedelta(weeks = 1)
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.dayago
section['section_end_datetime'] = self.dayago + timedelta(hours = 1)
if section['confirmed_mode'] == 5:
airSection = copy(section)
airSection['confirmed_mode'] = 9
airSection['_id'] = section['_id'] + "_air"
self.SectionsColl.insert(airSection)
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
self.SectionsColl.save(section)
def tearDown(self):
for testUser in self.testUsers:
etc.purgeSectionData(self.SectionsColl, testUser)
self.ModesColl.remove()
self.assertEquals(self.ModesColl.estimated_document_count(), 0)
def getMyQuerySpec(self, user, modeId):
return common.getQuerySpec(user, modeId, self.weekago, self.now)
def testGetModes(self):
modes = carbon.getAllModes()
for mode in modes:
print(mode['mode_id'], mode['mode_name'])
self.assertEquals(len(modes), 9)
def testGetDisplayModes(self):
modes = carbon.getDisplayModes()
for mode in modes:
print(mode['mode_id'], mode['mode_name'])
# skipping transport, underground and not a trip
self.assertEquals(len(modes), 8)
def testGetTripCountForMode(self):
modes = carbon.getDisplayModes()
# try different modes
self.assertEqual(carbon.getTripCountForMode("[email protected]", 1, self.weekago, self.now), 1) # walk
self.assertEqual(carbon.getTripCountForMode("[email protected]", 5, self.weekago, self.now), 1) # bus
self.assertEqual(carbon.getTripCountForMode("[email protected]", 9, self.weekago, self.now), 1) # bus
# try different users
self.assertEqual(carbon.getTripCountForMode("[email protected]", 1, self.weekago, self.now), 1) # walk
self.assertEqual(carbon.getTripCountForMode("[email protected]", 5, self.weekago, self.now), 1) # bus
# try to sum across users
# We have 5 users - best, fest, rest, nest and test
self.assertEqual(carbon.getTripCountForMode(None, 1, self.weekago, self.now), 5) # walk
self.assertEqual(carbon.getTripCountForMode(None, 5, self.weekago, self.now), 5) # bus
def testTotalModeShare(self):
modeshare = carbon.getModeShare(None, self.weekago, self.now)
self.assertEqual(modeshare['walking'], 5)
self.assertEqual(modeshare['bus'], 5)
self.assertEqual(modeshare['cycling'], 0)
self.assertEqual(modeshare['car'], 0)
self.assertEqual(modeshare['train'], 0)
# self.assertFalse(modeshare.keys() contains 'not a trip')
# self.assertFalse(modeshare.keys() contains 'transport')
def testMyModeShare(self):
modeshare = carbon.getModeShare('[email protected]', self.weekago, self.now)
print(modeshare)
self.assertEqual(modeshare['walking'], 1)
self.assertEqual(modeshare['bus'], 1)
self.assertEqual(modeshare['cycling'], 0)
self.assertEqual(modeshare['car'], 0)
self.assertEqual(modeshare['train'], 0)
# self.assertFalse(modeshare.keys() contains 'not a trip')
# self.assertFalse(modeshare.keys() contains 'transport')
def testDistanceForMode(self):
# try different modes
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("[email protected]", 1)),
self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("[email protected]", 5)),
self.busExpect) # bus
# try different users
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("[email protected]", 1)), self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("[email protected]", 5)), self.busExpect) # bus
# try to sum across users
# We have 5 users - best, fest, rest, nest and test
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec(None, 1)), len(self.testUsers) * self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec(None, 5)), len(self.testUsers) * self.busExpect) # bus
def testMyModeDistance(self):
myModeDistance = carbon.getModeShareDistance('[email protected]', self.weekago, self.now)
self.assertEqual(myModeDistance['walking'], self.walkExpect)
self.assertEqual(myModeDistance['cycling'], 0)
self.assertEqual(myModeDistance['bus'], self.busExpect)
self.assertEqual(myModeDistance['train'], 0)
def testTotalModeDistance(self):
totalModeDistance = carbon.getModeShareDistance(None, self.weekago, self.now)
self.assertEqual(totalModeDistance['walking'], len(self.testUsers) * self.walkExpect)
self.assertEqual(totalModeDistance['cycling'], 0)
self.assertEqual(totalModeDistance['bus'], len(self.testUsers) * self.busExpect)
self.assertEqual(totalModeDistance['train'], 0)
def testMyCarbonFootprint(self):
myModeDistance = carbon.getModeCarbonFootprint('[email protected]', carbon.carbonFootprintForMode, self.weekago, self.now)
self.assertEqual(myModeDistance['walking'], 0)
self.assertEqual(myModeDistance['cycling'], 0)
self.assertEqual(myModeDistance['bus_short'], (self.busCarbon * self.busExpect/1000))
self.assertEqual(myModeDistance['train_short'], 0)
# We duplicate the bus trips to get air trips, so the distance should be the same
self.assertEqual(myModeDistance['air_short'], (self.airCarbon * self.busExpect/1000))
def testTotalCarbonFootprint(self):
totalModeDistance = carbon.getModeCarbonFootprint(None, carbon.carbonFootprintForMode, self.weekago, self.now)
self.assertEqual(totalModeDistance['walking'], 0)
self.assertEqual(totalModeDistance['cycling'], 0)
# We divide by 1000 to make it comprehensible in getModeCarbonFootprint
self.assertEqual(totalModeDistance['bus_short'], old_div((self.busCarbon * len(self.testUsers) * self.busExpect),1000))
self.assertEqual(totalModeDistance['air_short'], old_div((self.airCarbon * len(self.testUsers) * self.busExpect),1000))
self.assertEqual(totalModeDistance['train_short'], 0)
def testMySummary(self):
(myModeShareCount, avgModeShareCount,
myModeShareDistance, avgModeShareDistance,
myModeCarbonFootprint, avgModeCarbonFootprint,
myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized,
myOptimalCarbonFootprint, avgOptimalCarbonFootprint,
myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = carbon.getFootprintCompare('[email protected]')
# >>> m = {'air_long': 0, 'air_short': 0.2, 'bus_long': 0, 'bus_short': 0.3}
# >>> f = [(i, m[i]) for i in m if m[i] != 0]
# >>> f
# [('bus_short', 0.3), ('air_short', 0.2)]
# >>> dict(f)
# {'bus_short': 0.3, 'air_short': 0.2}
filterZero = lambda m: dict([(i, m[i]) for i in m if m[i] != 0])
self.assertEqual(len(myModeShareCount), len(carbon.getDisplayModes()))
self.assertEqual(len(myModeShareDistance), len(carbon.getDisplayModes()))
# We have duplicated the bus trip to get bus, air and unconfirmed trips.
# we ignore the unconfirmed trip, so only expect to get three values...
self.assertAlmostEqual(sum(myModeShareDistance.values()), 2 * self.busExpect + self.walkExpect, places = 4)
self.assertEqual(filterZero(myModeShareDistance),
{'bus': self.busExpect,
'walking': self.walkExpect,
'air': self.busExpect})
logging.debug(filterZero(myModeShareDistance))
self.assertEqual(filterZero(myModeCarbonFootprint),
{'bus_short': old_div((self.busExpect * self.busCarbon),1000),
'air_short': old_div((self.busExpect * self.airCarbon),1000)})
self.assertEqual(filterZero(myModeCarbonFootprintNoLongMotorized),
{'bus_short': old_div((self.busExpect * self.busCarbon),1000)})
self.assertEqual(filterZero(myOptimalCarbonFootprint),
{'air_short': old_div((self.busExpect * self.busOptimalCarbon),1000)})
self.assertEqual(filterZero(myOptimalCarbonFootprintNoLongMotorized),
{})
def testSummaryAllTrips(self):
summary = carbon.getSummaryAllTrips(self.weekago, self.now)
# *2 because the walking trips don't count, but we have doubled the bus
# trips to count as air trips
self.assertEqual(summary['current'], old_div((self.busCarbon * self.busExpect + self.airCarbon * self.busExpect),1000))
# No * 2 because the optimal value for short bus trips is to actually move to bikes :)
self.assertEqual(summary['optimal'], old_div((self.busOptimalCarbon * self.busExpect),1000))
# These are are without air, so will only count the bus trips
self.assertEqual(summary['current no air'], old_div((self.busCarbon * self.busExpect),1000))
self.assertEqual(summary['optimal no air'], 0)
self.assertAlmostEqual(summary['all drive'], old_div((self.driveCarbon * (self.busExpect * 2 + self.walkExpect)),1000), places = 4)
def testDistinctUserCount(self):
self.assertEqual(carbon.getDistinctUserCount({}), len(self.testUsers))
def testFilteredDistinctUserCount(self):
# Now, move all the sections before a week
# Now there should be no matches in the last week
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.weekago + timedelta(days = -1)
section['section_end_datetime'] = self.weekago + timedelta(days = -1) + timedelta(hours = 1)
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
self.SectionsColl.save(section)
print("About to check for distinct users from a week ago")
self.assertEqual(carbon.getDistinctUserCount(carbon.getQuerySpec(None, None,
self.weekago, self.now)), 0)
self.assertEqual(carbon.getDistinctUserCount(carbon.getQuerySpec(None, None,
self.weekago + timedelta(weeks = -1), self.now)), len(self.testUsers))
def testDelLongMotorizedModes(self):
testMap = {'bus': 1, 'air': 3}
carbon.delLongMotorizedModes(testMap)
self.assertEqual(len(testMap), 1)
self.assertEqual(testMap, {'bus': 1})
def testDelLongMotorizedModesShortLong(self):
testMap = {'bus_short': 1, 'bus_long': 2, 'air_short': 3, 'air_long': 4}
carbon.delLongMotorizedModes(testMap)
self.assertEqual(len(testMap), 2)
self.assertIn('bus_short', testMap)
self.assertIn('bus_long', testMap)
self.assertNotIn('air_short', testMap)
self.assertNotIn('air_long', testMap)
def testGetCarbonFootprintsForMap(self):
testDistanceMap = {'a': 1, 'b': 2, 'c': 3}
testModeFootprintMap = {'a': 1, 'b': 2, 'c': 3}
footprintMap = carbon.getCarbonFootprintsForMap(testDistanceMap, testModeFootprintMap)
self.assertEqual(footprintMap, {'a': 0.001, 'b': 0.004, 'c': 0.009})
def testAvgCalculation(self):
testMap = {'a': 5, 'b': 10, 'c': 15, 'd': 3, 'e': 7, 'f': 13}
avgTestMap = carbon.convertToAvg(testMap, 5)
self.assertEquals(avgTestMap['a'], 1)
self.assertEquals(avgTestMap['b'], 2)
self.assertEquals(avgTestMap['c'], 3)
self.assertEquals(avgTestMap['d'], 0.6)
self.assertEquals(avgTestMap['e'], 1.4)
self.assertEquals(avgTestMap['f'], 2.6)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| shankari/e-mission-server | emission/incomplete_tests/TestCarbon.py | Python | bsd-3-clause | 13,159 | 0.012387 |
from recon.core.module import BaseModule
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'List Creator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Creates a file containing a list of records from the database.',
'options': (
('table', 'hosts', True, 'source table of data for the list'),
('column', 'ip_address', True, 'source column of data for the list'),
('unique', True, True, 'only return unique items from the dataset'),
('nulls', False, True, 'include nulls in the dataset'),
('filename', os.path.join(BaseModule.workspace, 'list.txt'), True, 'path and filename for output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# handle the source of information for the report
column = self.options['column']
table = self.options['table']
nulls = ' WHERE "%s" IS NOT NULL' % (column) if not self.options['nulls'] else ''
unique = 'DISTINCT ' if self.options['unique'] else ''
values = (unique, column, table, nulls)
query = 'SELECT %s"%s" FROM "%s"%s ORDER BY 1' % values
rows = self.query(query)
for row in [x[0] for x in rows]:
row = row if row else ''
outfile.write('%s\n' % (row))
print(row)
self.output('%d items added to \'%s\'.' % (len(rows), filename))
| praetorian-inc/pentestly | modules/reporting/list.py | Python | gpl-3.0 | 1,569 | 0.003824 |
# da vs turns module
import numpy as np
from scipy import optimize
import matplotlib.pyplot as pl
import glob, sys, os, time
from deskdb import SixDeskDB,tune_dir,mk_dir
import matplotlib
# ------------- basic functions -----------
def get_divisors(n):
"""finds the divisors of an integer number"""
large_divisors = []
for i in xrange(1, int(np.sqrt(n) + 1)):
if n % i is 0:
yield i
if i is not n / i:
large_divisors.insert(0, n / i)
for divisor in large_divisors:
yield divisor
def linear_fit(datx,daty,daterr):
'''Linear model fit with f(x)=p0+p1*x
(datx,daty): data, daterr: measurement error
return values (res,p0,p0err,p1,p1err):
- res: sum of residuals^2 normalized with the measurment error
- p0,p1: fit paramaeters
- p0err, p1err: error of fit parameters'''
fitfunc = lambda p,x: p[0]+p[1]*x#p[0]=Dinf, p[1]=b0
errfunc = lambda p,x,y,err: (y-fitfunc(p,x))/err
pinit = [0.1, 0.1]
#minimize
outfit=optimize.leastsq(errfunc, pinit,args=(datx,daty,daterr),full_output=1)
(p0,p1)=outfit[0]#(p[0],p[1])
var =outfit[1]#variance matrix
p0err =np.sqrt(var[0,0])#err p[0]
p1err =np.sqrt(var[1,1])#err p[1]
# res=sum((daty-fitfunc((p0,p1),datx))**2)/len(datx-2) #not weighted with error
res=sum((errfunc((p0,p1),datx,daty,daterr))**2)/len(datx)#weighted with error
return (res,p0,p0err,p1,p1err)
# ----------- functions necessary for the analysis -----------
#@profile
def get_min_turn_ang(s,t,a,it):
"""returns array with (angle,minimum sigma,sturn) of particles with lost turn number < it.
check if there is a particle with angle ang with lost turn number <it
if true: lost turn number and amplitude of the last stable particle is saved = particle "before" the particle with the smallest amplitude with nturns<it
if false: the smallest lost turn number and the largest amplitude is saved
"""
# s,t,a are ordered by angle,amplitude
angles,sigmas=t.shape# angles = number of angles, sigmas = number of amplitudes
ftype=[('angle',float),('sigma',float),('sturn',float)]
mta=np.zeros(angles,dtype=ftype)
# enumerate(a[:,0]) returns (0, a[0]), (1, a[1]), (2, a[2]), ... = iang, ang where iang = index of the array (0,1,2,...) for ang = angle (e.g. [1.5, ... , 1.5] , [3.0, ... ,3.0])
for iang,ang in enumerate(a[:,0]):
tang = t[iang]
sang = s[iang]
iturn = tang<it # select lost turn number < it
if(any(tang[iturn])):
sangit=sang[iturn].min()
argminit=sang.searchsorted(sangit) # get index of smallest amplitude with sturn<it - amplitudes are ordered ascending
mta[iang]=(ang,sang[argminit-1],tang[argminit-1])#last stable amplitude -> index argminit-1
else:
mta[iang]=(ang,sang.max(),tang.min())
return mta
def select_ang_surv(data,seed,nang):
"""returns data reduced to ((angmax+1)/nang)-1 angles -> nang being the divisor of angmax"""
angmax=len(data['angle'][:,0])#number of angles
print nang
if((nang not in list(get_divisors(angmax+1))) or ((angmax+1)/nang-1<3)):
print('%s is not a divisor of %s or two large (((angmax+1)/nang)-1<3)')%(nang,angmax+1)
sys.exit(0)
#define variables for only selection of angles
s,a,t=data['sigma'][nang::nang+1],data['angle'][nang::nang+1],data['sturn'][nang::nang+1]
ftype=[('angle',float),('sigma',float),('sturn',float)]
dataang=np.ndarray(np.shape(a),dtype=ftype)
dataang['sigma'],dataang['angle'],dataang['sturn']=s,a,t
return dataang
#@profile
def mk_da_vst(data,seed,tune,turnsl,turnstep):
"""returns 'seed','tunex','tuney','dawtrap','dastrap','dawsimp','dassimp',
'dawtraperr','dastraperr','dastraperrep','dastraperrepang',
'dastraperrepamp','dawsimperr','dassimperr','nturn','tlossmin',
'mtime'
the da is in steps of turnstep
das: integral over radius
das = 2/pi*int_0^(2pi)[r(theta)]dtheta=<r(theta)>
= 2/pi*dtheta*sum(a_i*r(theta_i))
daw: integral over phase space
daw = (int_0^(2pi)[(r(theta))^4*sin(2*theta)]dtheta)^1/4
= (dtheta*sum(a_i*r(theta_i)^4*sin(2*theta_i)))^1/4
trapezoidal rule (trap): a_i=(3/2,1, ... ,1,3/2)
simpson rule (simp): a_i=(55/24.,-1/6.,11/8.,1, ... 1,11/8.,-1/6.,55/24.)
numerical recipes open formulas 4.1.15 and 4.1.18
"""
mtime=time.time()
(tunex,tuney)=tune
s,a,t=data['sigma'],data['angle'],data['sturn']
tmax=np.max(t[s>0])#maximum number of turns
#set the 0 in t to tmax*100 in order to check if turnnumber<it (any(tang[tang<it])<it in get_min_turn_ang)
t[s==0]=tmax*100
angmax=len(a[:,0])#number of angles
angstep=np.pi/(2*(angmax+1))#step in angle in rad
ampstep=np.abs((s[s>0][1])-(s[s>0][0]))
ftype=[('seed',int),('tunex',float),('tuney',float),('turn_max',int),('dawtrap',float),('dastrap',float),('dawsimp',float),('dassimp',float),('dawtraperr',float),('dastraperr',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('dawsimperr',float),('dassimperr',float),('nturn',float),('tlossmin',float),('mtime',float)]
l_turnstep=len(np.arange(turnstep,tmax,turnstep))
daout=np.ndarray(l_turnstep,dtype=ftype)
for nm in daout.dtype.names:
daout[nm]=np.zeros(l_turnstep)
dacount=0
currentdawtrap=0
currenttlossmin=0
#define integration coefficients at beginning and end which are unequal to 1
ajtrap_s=np.array([3/2.])#Simpson rule
ajtrap_e=np.array([3/2.])
ajsimp_s=np.array([55/24.,-1/6.,11/8.])#Simpson rule
ajsimp_e=np.array([11/8.,-1/6.,55/24.])
warnsimp=True
for it in np.arange(turnstep,tmax,turnstep):
mta=get_min_turn_ang(s,t,a,it)
mta_angle=mta['angle']*np.pi/180#convert to rad
l_mta_angle=len(mta_angle)
mta_sigma=mta['sigma']
if(l_mta_angle>2):
# define coefficients for simpson rule (simp)
# ajtrap = [3/2.,1,....1,3/2.]
ajtrap=np.concatenate((ajtrap_s,np.ones(l_mta_angle-2),ajtrap_e))
else:
print('WARNING! mk_da_vst - You need at least 3 angles to calculate the da vs turns! Aborting!!!')
sys.exit(0)
if(l_mta_angle>6):
# define coefficients for simpson rule (simp)
# ajsimp = [55/24.,-1/6.,11/8.,1,....1,11/8.,-1/6.,55/24. ]
ajsimp=np.concatenate((ajsimp_s,np.ones(l_mta_angle-6),ajsimp_e))
calcsimp=True
else:
if(warnsimp):
print('WARNING! mk_da_vst - You need at least 7 angles to calculate the da vs turns with the simpson rule! da*simp* will be set to 0.')
warnsimp=False
calcsimp=False
# ---- trapezoidal rule (trap)
# integral
dawtrapint = ((ajtrap*(mta_sigma**4*np.sin(2*mta_angle))).sum())*angstep
dawtrap = (dawtrapint)**(1/4.)
dastrap = (2./np.pi)*(ajtrap*(mta_sigma)).sum()*angstep
# error
dawtraperrint = np.abs(((ajtrap*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum())*angstep*ampstep)
dawtraperr = np.abs(1/4.*dawtrapint**(-3/4.))*dawtraperrint
dastraperr = ampstep/2
dastraperrepang = ((np.abs(np.diff(mta_sigma))).sum())/(2*(angmax+1))
dastraperrepamp = ampstep/2
dastraperrep = np.sqrt(dastraperrepang**2+dastraperrepamp**2)
# ---- simpson rule (simp)
if(calcsimp):
# int
dawsimpint = (ajsimp*((mta_sigma**4)*np.sin(2*mta_angle))).sum()*angstep
dawsimp = (dawsimpint)**(1/4.)
dassimpint = (ajsimp*mta_sigma).sum()*angstep
dassimp = (2./np.pi)*dassimpint
# error
dawsimperrint = (ajsimp*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum()*angstep*ampstep
dawsimperr = np.abs(1/4.*dawsimpint**(-3/4.))*dawsimperrint
dassimperr = ampstep/2#simplified
else:
(dawsimp,dassimp,dawsimperr,dassimperr)=np.zeros(4)
tlossmin=np.min(mta['sturn'])
if(dawtrap!=currentdawtrap and it-turnstep >= 0 and tlossmin!=currenttlossmin):
daout[dacount]=(seed,tunex,tuney,turnsl,dawtrap,dastrap,dawsimp,dassimp,dawtraperr,dastraperr,dastraperrep,dastraperrepang,dastraperrepamp,dawsimperr,dassimperr,it-turnstep,tlossmin,mtime)
dacount=dacount+1
currentdawtrap =dawtrap
currenttlossmin=tlossmin
return daout[daout['dawtrap']>0]#delete 0 from errors
# ----------- functions to calculat the fit -----------
def get_fit_data(data,fitdat,fitdaterr,fitndrop,fitkap,b1):
'''linearize data for da vs turns fit according to model:
D(N) = Dinf+b0/(log(N^(exp(-b1))))^kappa'''
datx=1/(np.log(data['tlossmin'][fitndrop::]**np.exp(-b1))**fitkap)
# print (fitdat,fitdaterr)
daty=data[fitdat][fitndrop::]
if fitdaterr=='none':#case of no errors
daterr=np.ones(len(datx))
else:
daterr=data[fitdaterr][fitndrop::]
return datx,daty,daterr
def get_b1mean(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap):
'''returns (mean(b1),errmean(b1),std(b1)) over the seeds
with b1 being the fit parameter in:
D(N) = Dinf+b0/(log(N^(exp(-b1))))^kappa
and a linear relation is assumed between:
log(|b|)=log(|b0|)+b1*kappa <=> b=b0*exp(b1*kappa)
with b being the fit paramter in:
D(N) = Dinf+b/(log(N))^kappa
fitndrop=do not include first fitndrop data points
fitkap=kappa'''
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
ftype=[('seed',int),('res',float),('logb0',float),('logb0err',float),('b1',float),('b1err',float)]
lklog=np.zeros(len(db.get_db_seeds()),dtype=ftype)
ftype=[('kappa',float),('res',float),('dinf',float),('dinferr',float),('b',float),('berr',float)]
lkap=np.zeros(len(np.arange(fitskap,fitekap+fitdkap,fitdkap))-1,dtype=ftype)
ccs=0
for seed in db.get_db_seeds():
data=db.get_da_vst(seed,tune)
#start: scan over kappa
cck=0
for kap in np.arange(fitskap,fitekap+fitdkap,fitdkap):
if(abs(kap)>1.e-6):#for kappa=0: D(N)=Dinf+b/(log(N)^kappa)=D(N)=Dinf+b -> fit does not make sense
datx,daty,daterr=get_fit_data(data,fitdat,fitdaterr,fitndrop,kap,0)#fit D(N)=Dinf+b/(log(N)^kappa
lkap[cck]=(kap,)+linear_fit(datx,daty,daterr)
cck+=1
lklog[ccs]=(seed,)+linear_fit(lkap['kappa'],np.log(np.abs(lkap['b'])),1)#linear fit log(|b|)=log(|b0|)+b1*kappa for each seed
ccs+=1
return (np.mean(lklog['b1']),np.sqrt(np.mean(lklog['b1err']**2)),np.std(lklog['b1']))#error of mean value = sqrt(sum_i((1/n)*sigma_i**2))
def mk_da_vst_fit(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap):
'''1) a) fit D(N)=Dinf+b/(log(N))^kappa for all seeds and
scan range (skap,ekap,dkap)
b) assume linear dependence of b on kappa:
log(|b|)=log(|b0|)+b1*kappa
-> b1 for all seeds
c) calculate avg(b1) over all seeds
2) a) fit D(N)=Dinf+b0/(log(N)^(exp(-b1)))^kappa
for fixed b1=b1mean (obtained in 1))
and scan range (skap,ekap,dkap)
b) use (b0,kappa) with minimum residual'''
turnsl=db.env_var['turnsl']
mtime=time.time()
(tunex,tuney)=tune
print('calculating b1mean ...')
(b1mean,b1meanerr,b1std)=get_b1mean(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap)
print('average over %s seeds: b1mean=%s, b1meanerr=%s, b1std=%s'%(round(len(db.get_db_seeds())),round(b1mean,3),round(b1meanerr,3),round(b1std,3)))
print('start scan over kappa for fixed b1=%s to find kappa with minimum residual ...'%b1mean)
ftype=[('kappa',float),('dkappa',float),('res',float),('dinf',float),('dinferr',float),('b0',float),('b0err',float)]
lkap=np.zeros(len(np.arange(fitskap,fitekap+fitdkap,fitdkap))-1,dtype=ftype)#-1 as kappa=0 is not used
ftype=[('seed',float),('tunex',float),('tuney',float),('turn_max',int),('fitdat',np.str_, 30),('fitdaterr',np.str_, 30),('fitndrop',float),('kappa',float),('dkappa',float),('res',float),('dinf',float),('dinferr',float),('b0',float),('b0err',float),('b1mean',float),('b1meanerr',float),('b1std',float),('mtime',float)]
minkap=np.zeros(len(db.get_db_seeds()),dtype=ftype)
ccs=0
for seed in db.get_db_seeds():
data=db.get_da_vst(seed,tune)
#start: scan over kappa
cck=0
for kap in np.arange(fitskap,fitekap+fitdkap,fitdkap):
if(abs(kap)>1.e-6):#for kappa=0: D(N)=Dinf+b/(log(N)^kappa)=D(N)=Dinf+b -> fit does not make sense
datx,daty,daterr=get_fit_data(data,fitdat,fitdaterr,fitndrop,kap,b1mean)
lkap[cck]=(kap,fitdkap,)+linear_fit(datx,daty,daterr)
cck+=1
iminkap=np.argmin(lkap['res'])
minkap[ccs]=(seed,tunex,tuney,turnsl,fitdat,fitdaterr,fitndrop,)+tuple(lkap[iminkap])+(b1mean,b1meanerr,b1std,mtime,)
ccs+=1
print('... scan over kappa is finished!')
return minkap
# ----------- functions to reload and create DA.out files for previous scripts -----------
def save_daout_old(data,filename):
daoutold=data[['dawtrap','dastrap','dastraperrep','dastraperrepang','dastraperrepamp','nturn','tlossmin']]
np.savetxt(filename,daoutold,fmt='%.6f %.6f %.6f %.6f %.6f %d %d')
def reload_daout_old(filename):
ftype=[('dawtrap',float),('dastrap',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('nturn',float),('tlossmin',float)]
return np.loadtxt(filename,dtype=ftype,delimiter=' ')
def save_daout(data,filename):
daout=data[['seed','tunex','tuney','turn_max','dawtrap','dastrap','dawsimp','dassimp','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr','dassimperr','nturn','tlossmin']]
np.savetxt(filename,daout,fmt='%d %.6f %.6f %d %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %d %d')
def save_davst_fit(data,filename):
fitdata=data[['seed','tunex','tuney','turn_max','fitdat','fitdaterr','fitndrop','kappa','dkappa','res','dinf','dinferr','b0','b0err','b1mean','b1meanerr','b1std']]
np.savetxt(filename,fitdata,fmt='%d %.5f %.5f %d %s %s %d %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f')
def reload_daout(filename):
ftype=[('seed',int),('tunex',float),('tuney',float),('turn_max',int),('dawtrap',float),('dastrap',float),('dawsimp',float),('dassimp',float),('dawtraperr',float),('dastraperr',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('dawsimperr',float),('dassimperr',float),('nturn',float),('tlossmin',float),('mtime',float)]
return np.loadtxt(filename,dtype=ftype,delimiter=' ')
def save_dasurv(data,filename):
np.savetxt(filename,np.reshape(data,-1),fmt='%.8f %.8f %d')
def reload_dasurv(path):
ftype=[('angle', '<f8'), ('sigma', '<f8'), ('sturn', '<f8')]
data=np.loadtxt(glob.glob(path+'/dasurv.out*')[0],dtype=ftype,delimiter=' ')
angles=len(set(data['angle']))
return data.reshape(angles,-1)
def plot_surv_2d_stab(db,lbl,mksize,cl,seed,tune,ampmax):
'''survival plot: stable area of two studies'''
data=db.get_surv(seed,tune)
s,a,t=data['sigma'],data['angle'],data['sturn']
s,a,t=s[s>0],a[s>0],t[s>0]#delete 0 values
tmax=np.max(t)
sxstab=s[t==tmax]*np.cos(a[t==tmax]*np.pi/180)
systab=s[t==tmax]*np.sin(a[t==tmax]*np.pi/180)
pl.scatter(sxstab,systab,mksize,marker='o',color=cl,edgecolor='none',label=lbl)
pl.title('seed '+str(seed),fontsize=12)
pl.xlim([0,ampmax])
pl.ylim([0,ampmax])
pl.xlabel(r'Horizontal amplitude [$\sigma$]',labelpad=10,fontsize=12)
pl.ylabel(r'Vertical amplitude [$\sigma$]',labelpad=10,fontsize=12)
def plot_surv_2d_comp(db,dbcomp,lbl,complbl,seed,tune,ampmax):
'''survival plot: stable area of two studies'''
data=db.get_surv(seed,tune)
datacomp=dbcomp.get_surv(seed,tune)
pl.close('all')
pl.figure(figsize=(6,6))
plot_surv_2d_stab(db,lbl,10,'b',seed,tune,ampmax)
plot_surv_2d_stab(dbcomp,complbl,2,'r',seed,tune,ampmax)
pl.legend(loc='best')
def plot_comp_da_vst(db,dbcomp,ldat,ldaterr,lblname,complblname,seed,tune,ampmin,ampmax,tmax,slog,sfit,fitndrop):
"""plot dynamic aperture vs number of turns,
blue/green=simple average, red/orange=weighted average"""
pl.close('all')
pl.figure(figsize=(6,6))
for dbbb in [db,dbcomp]:
data=dbbb.get_da_vst(seed,tune)
if(dbbb.LHCDescrip==db.LHCDescrip):
lbl = lblname
fmtpl = 'bo'
fmtfit= 'b-'
if(dbbb.LHCDescrip==dbcomp.LHCDescrip):
lbl = complblname
fmtpl = 'ro'
fmtfit = 'r-'
# pl.errorbar(data[ldat[0]],data['tlossmin'],xerr=data[ldaterr[0]],fmt=fmtpl,markersize=2,label='%s %s'%(ldat[0],lbl))
pl.errorbar(data[ldat[0]],data['tlossmin'],xerr=data[ldaterr[0]],fmt=fmtpl,markersize=2,label='%s'%(lbl))
if(sfit):
fitdata=dbbb.get_da_vst_fit(seed,tune)
fitdata=fitdata[fitdata['fitdat']==ldat[0]]
fitdata=fitdata[fitdata['fitdaterr']==ldaterr[0]]
fitdata=fitdata[np.abs(fitdata['fitndrop']-float(fitndrop))<1.e-6]
if(len(fitdata)==1):
pl.plot(fitdata['dinf']+fitdata['b0']/(np.log(data['tlossmin']**np.exp(-fitdata['b1mean']))**fitdata['kappa']),data['tlossmin'],fmtfit)
else:
print('Warning: no fit data available or data ambigious!')
pl.title('seed '+str(seed),fontsize=16)
pl.xlim([ampmin,ampmax])
pl.xlabel(r'Dynamic aperture [$\sigma$]',labelpad=10,fontsize=16)
pl.ylabel(r'Number of turns',labelpad=15,fontsize=16)
plleg=pl.gca().legend(loc='best',fontsize=16)
for label in plleg.get_texts():
label.set_fontsize(12)
if(slog):
pl.ylim([5.e3,tmax])
pl.yscale('log')
else:
pl.ylim([0,tmax])
pl.gca().ticklabel_format(style='sci',axis='y',scilimits=(0,0))
def clean_dir_da_vst(db,files):
'''create directory structure and if force=true delete old files of da vs turns analysis'''
for seed in db.get_seeds():
for tune in db.get_db_tunes():
pp=db.mk_analysis_dir(seed,tune)# create directory
if(len(files)>0):#delete old plots and files
for filename in files:
ppf=os.path.join(pp,filename)
if(os.path.exists(ppf)): os.remove(ppf)
if(len(files)>0):
print('remove old {0} ... files in '+db.LHCDescrip).format(files)
# for error analysis - data is not saved in database but output files are generated
def RunDaVsTurnsAng(db,seed,tune,turnstep):
"""Da vs turns -- calculate da vs turns for divisors of angmax,
e.g. for angmax=29+1 for divisors [1, 2, 3, 5, 6] - last 2 [10,15] are omitted as the number of angles has to be larger than 3"""
# start analysis
try:
turnstep=int(float(turnstep))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: turnstep must be integer values!')
sys.exit(0)
if(seed not in db.get_db_seeds()):
print('WARNING: Seed %s is missing in database !!!'%seed)
sys.exit(0)
if(tune not in db.get_db_tunes()):
print('WARNING: tune %s is missing in database !!!'%tune)
sys.exit(0)
turnsl=db.env_var['turnsl']#get turnsl for outputfile names
seed=int(seed)
print('analyzing seed {0} and tune {1}...').format(str(seed),str(tune))
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
print('... get survival data')
dasurvtot= db.get_surv(seed,tune)
a=dasurvtot['angle']
angmax=len(a[:,0])#number of angles
#use only divisors nang with (angmax+1)/nang-1>=3 = minimum number of angles for trapezoidal rule
divsall=np.array(list(get_divisors(angmax+1)))
divs=divsall[(angmax+1)/divsall-1>2]
print('... number of angles: %s, divisors: %s'%(angmax,str(divs)))
for nang in divs:
dirnameang='%s/%s'%(dirname,nang)
mk_dir(dirnameang)
dasurv=select_ang_surv(dasurvtot,seed,nang)
print('... calculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
save_daout(daout,dirnameang)
print('... save da vs turns data in {0}/DA.out').format(dirnameang)
# in analysis - putting the pieces together
def RunDaVsTurns(db,force,outfile,outfileold,turnstep,davstfit,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap,outfilefit):
'''Da vs turns -- calculate da vs turns for study dbname, if davstfit=True also fit the data'''
#---- calculate the da vs turns
try:
turnstep=int(float(turnstep))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: turnstep must be an integer values!')
sys.exit(0)
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
turnsl=db.env_var['turnsl']#get turnsl for outputfile names
turnse=db.env_var['turnse']
for seed in db.get_db_seeds():
seed=int(seed)
print('analyzing seed {0} ...').format(str(seed))
for tune in db.get_db_tunes():
print('analyzing tune {0} ...').format(str(tune))
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
print('... get survival data')
dasurv= db.get_surv(seed,tune)
if dasurv is None:
print("ERROR: survival data could not be retrieved due to "+
"and error in the database or tracking data. Skip "
"this seed %s"%(seed))
continue
print('... get da vs turns data')
daout = db.get_da_vst(seed,tune)
if(len(daout)>0):#reload data, if input data has changed redo the analysis
an_mtime=daout['mtime'].min()
res_mtime=db.execute('SELECT max(mtime) FROM six_results')[0][0]
if res_mtime>an_mtime or force is True:
files=('DA.%s.out DAsurv.%s.out DA.%s.png DAsurv.%s.png DAsurv_log.%s.png DAsurv_comp.%s.png DAsurv_comp_log.%s.png'%(turnse,turnse,turnse,turnse,turnse,turnse,turnse)).split()+['DA.out','DAsurv.out','DA.png','DAsurv.png','DAsurv_log.png','DAsurv_comp.png','DAsurv_comp_log.png']
clean_dir_da_vst(db,files)# create directory structure and delete old files
print('... input data has changed or force=True - recalculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
print('.... save data in database')
#check if old table name da_vsturn exists, if yes delete it
if(db.check_table('da_vsturn')):
print('... delete old table da_vsturn - table will be substituted by new table da_vst')
db.execute("DROP TABLE da_vsturn")
db.st_da_vst(daout,recreate=True)
else:#create data
print('... calculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
print('.... save data in database')
db.st_da_vst(daout,recreate=False)
if(outfile):# create dasurv.out and da.out files
fnsurv='%s/DAsurv.%s.out'%(dirname,turnse)
save_dasurv(dasurv,fnsurv)
print('... save survival data in {0}').format(fnsurv)
fndaout='%s/DA.%s.out'%(dirname,turnse)
save_daout(daout,fndaout)
print('... save da vs turns data in {0}').format(fndaout)
if(outfileold):
fndaoutold='%s/DAold.%s.out'%(dirname,turnse)
save_daout_old(daout,fndaoutold)
print('... save da vs turns (old data format) data in {0}').format(fndaoutold)
#---- fit the data
if(davstfit):
if(fitdat in ['dawtrap','dastrap','dawsimp','dassimp']):
if(fitdaterr in ['none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr','dassimperr']):
try:
fitndrop=int(float(fitndrop))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: fitndrop must be an integer values! - Aborting!')
sys.exit(0)
try:
fitskap=float(fitskap)
fitekap=float(fitekap)
fitdkap=float(fitdkap)
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: fitskap,fitekap and fitdkap must be an float values! - Aborting!')
sys.exit(0)
if((np.arange(fitskap,fitekap+fitdkap,fitdkap)).any()):
for tune in db.get_db_tunes():
print('fit da vs turns for tune {0} ...').format(str(tune))
fitdaout=mk_da_vst_fit(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap)
print('.... save fitdata in database')
db.st_da_vst_fit(fitdaout,recreate=False)
if(outfilefit):
(tunex,tuney)=tune
sixdesktunes="%g_%g"%(tunex,tuney)
fndot='%s/DAfit.%s.%s.%s.%s.%s.plot'%(db.mk_analysis_dir(),db.LHCDescrip,sixdesktunes,turnse,fitdat,fitdaterr)
save_davst_fit(fitdaout,fndot)
print('... save da vs turns fit data in {0}').format(fndot)
else:
print('Error in RunDaVsTurns: empty scan range for fitkap!')
else:
print("Error in -fitopt: <dataerr> has to be 'none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr' or 'dassimperr' - Aborting!")
sys.exit(0)
else:
print("Error in -fitopt: <data> has to be 'dawtrap','dastrap','dawsimp' or 'dassimp' - Aborting!")
sys.exit(0)
def PlotDaVsTurns(db,ldat,ldaterr,ampmaxsurv,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop):
'''plot survival plots and da vs turns for list of data ldat and associated error ldaterr'''
turnsl=db.env_var['turnsl']
turnse=db.env_var['turnse']
print('Da vs turns -- create survival and da vs turns plots')
try:
ampmaxsurv =float(ampmaxsurv)
ampmindavst=float(ampmindavst)
ampmaxdavst=float(ampmaxdavst)
except [ValueError,NameError,TypeError]:
print('Error in PlotDaVsTurns: ampmaxsurv and amprangedavst must be float values!')
sys.exit(0)
#remove all files
if(plotlog):
files=('DA_log.png DAsurv.png DA_log.%s.png DAsurv.%s.png'%(turnse,turnse)).split()
else:
files=('DA.png DAsurv.png DA.%s.png DAsurv.%s.png'%(turnse,turnse)).split()
clean_dir_da_vst(db,files)# create directory structure and delete old files if force=true
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
for seed in db.get_db_seeds():
seed=int(seed)
for tune in db.get_db_tunes():
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
pl.close('all')
pl.figure(figsize=(6,6))
db.plot_surv_2d(seed,tune,ampmaxsurv)#suvival plot
pl.savefig('%s/DAsurv.%s.png'%(dirname,turnse))
print('... saving plot %s/DAsurv.%s.png'%(dirname,turnse))
db.plot_da_vst(seed,tune,ldat,ldaterr,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop)#da vs turns plot
if(plotlog==True):
pl.savefig('%s/DA_log.%s.png'%(dirname,turnse))
print('... saving plot %s/DA_log.%s.png'%(dirname,turnse))
else:
pl.savefig('%s/DA.%s.png'%(dirname,turnse))
print('... saving plot %s/DA.%s.png'%(dirname,turnse))
def PlotCompDaVsTurns(db,dbcomp,ldat,ldaterr,lblname,complblname,ampmaxsurv,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop):
'''Comparison of two studies: survival plots (area of stable particles) and Da vs turns plots'''
matplotlib.rcParams.update({'font.size': 16})
turnsldb =db.env_var['turnsl']
turnsedb =db.env_var['turnse']
turnsldbcomp=dbcomp.env_var['turnsl']
turnsedbcomp=dbcomp.env_var['turnse']
if(not turnsldb==turnsldbcomp):
print('Warning! Maximum turn number turn_max of %s and %s differ!'%(db.LHCDescrip,dbcomp.LHCDescrip))
try:
ampmaxsurv=float(ampmaxsurv)
ampmindavst=float(ampmindavst)
ampmaxdavst=float(ampmaxdavst)
tmax=int(float(tmax))
except ValueError,NameError:
print('Error in PlotCompDaVsTurns: ampmaxsurv and amprangedavst must be float values and tmax an integer value!')
sys.exit(0)
#remove all files
if(plotlog):
files=('DA_comp_log.png DAsurv_comp.png DA_comp_log.%s.png DAsurv_comp.%s.png'%(turnsedb,turnsedb)).split()
else:
files=('DA_comp.png DAsurv_comp.png DA_comp.%s.png DAsurv_comp.%s.png'%(turnsedb,turnsedb)).split()
clean_dir_da_vst(db,files)# create directory structure and delete old files if force=true
# start analysis
if(not db.check_seeds()):
print('Seeds are missing in database!')
for seed in db.get_db_seeds():
seed=int(seed)
for tune in db.get_db_tunes():
if(seed in dbcomp.get_db_seeds() and tune in db.get_db_tunes()):
dirname=db.mk_analysis_dir(seed,tune)#directories already created with
pl.close('all')
plot_surv_2d_comp(db,dbcomp,lblname,complblname,seed,tune,ampmaxsurv)
pl.savefig('%s/DAsurv_comp.%s.png'%(dirname,turnsedb))
print('... saving plot %s/DAsurv_comp.%s.png'%(dirname,turnsedb))
plot_comp_da_vst(db,dbcomp,ldat,ldaterr,lblname,complblname,seed,tune,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop)
if(plotlog==True):
pl.savefig('%s/DA_comp_log.%s.png'%(dirname,turnsedb),bbox_inches='tight')
print('... saving plot %s/DA_comp_log.%s.png'%(dirname,turnsedb))
else:
pl.savefig('%s/DA_comp.%s.png'%(dirname,turnsedb),bbox_inches='tight')
print('... saving plot %s/DA_comp.%s.png'%(dirname,turnsedb))
| mfittere/SixDeskDB | sixdeskdb/davsturns.py | Python | lgpl-2.1 | 28,687 | 0.049395 |
from JupJup.Present import collector
| karlin13/LzCoinJupJup | JupJup/Present/__init__.py | Python | mit | 37 | 0 |
from PySide import QtGui, QtCore
import os, struct, time
class ControlMainWindow(QtGui.QMainWindow):
_storeButtonUsed = False
_gv = None
_parser = None
_inter = None
_workpiecePos = [ 5, 5, 5 ]
_originOffset = [ 0, 0 ]
_polarCorrection = [ 1, 0 ]
_debounce = None
def __init__(self, chatBackend):
super(ControlMainWindow, self).__init__(None)
self._machine = MachineController(chatBackend);
self._machine.machineStatus().statusUpdated.connect(self.statusUpdated)
self._ui = Ui_MainWindow()
self._ui.setupUi(self)
self._ui.stop.clicked.connect(self._machine.stop)
self._ui.refMovement.clicked.connect(self.refMovement)
self._ui.importGCode.clicked.connect(self.importGCode)
self._ui.run.clicked.connect(self.run)
self._ui.resume.clicked.connect(self.resume)
self._ui.showGraphicsView.clicked.connect(self.showGraphicsView)
self._ui.gotoOther.setMenu(self._ui.menuGoto)
self._ui.storeOther.setMenu(self._ui.menuStore)
self._ui.menuBar.hide()
self._ui.storeXY.triggered.connect(self.storeXY)
self._ui.storeXYZ.triggered.connect(self.storeXYZ)
self._ui.storeX.triggered.connect(self.storeX)
self._ui.storeY.triggered.connect(self.storeY)
self._ui.storeZ.triggered.connect(self.storeZ)
self._ui.gotoXY.triggered.connect(self.gotoWorkpieceXY)
self._ui.gotoXYZ.triggered.connect(self.gotoWorkpieceXYZ)
self._ui.gotoX.triggered.connect(self.gotoWorkpieceX)
self._ui.gotoY.triggered.connect(self.gotoWorkpieceY)
self._ui.gotoZ.triggered.connect(self.gotoWorkpieceZ)
self._ui.driveXUp.clicked.connect(self.driveXUp)
self._ui.driveYUp.clicked.connect(self.driveYUp)
self._ui.driveZUp.clicked.connect(self.driveZUp)
self._ui.driveUUp.clicked.connect(self.driveUUp)
self._ui.driveXDown.clicked.connect(self.driveXDown)
self._ui.driveYDown.clicked.connect(self.driveYDown)
self._ui.driveZDown.clicked.connect(self.driveZDown)
self._ui.driveUDown.clicked.connect(self.driveUDown)
self._ui.feedRateOverride.valueChanged.connect(self.feedRateOverrideChanged)
self._machine.machineStatus().updateStatus()
@QtCore.Slot()
def refMovement(self):
# @fixme assert machine is not moving
self._machine.setAction(ReferenceMotionController(self._machine))
@QtCore.Slot()
def showGraphicsView(self):
if self._parser == None:
QtGui.QMessageBox.information(
self, 'PyPC-NC Graphics View',
'You need to import G-Code before visualizing it.')
return
if self._gv == None:
self._gv = ControlGraphicsView(self, self._machine)
self._gv.render(self._parser)
self._gv.show()
self._gv.closed.connect(self.graphicsViewClosed)
@QtCore.Slot()
def graphicsViewClosed(self):
self._gv = None
@QtCore.Slot()
def statusUpdated(self):
infos = []
if self._machine.machineStatus().status() & 0x10: infos.append('moving')
if self._machine.machineStatus().status() & 0x04: infos.append("ref'd")
if self._machine.machineStatus().status() & 0x08: infos.append("ref'ing")
status = hex(self._machine.machineStatus().status())
if infos:
status += ' (' + ', '.join(infos) + ')'
self._ui.statusX.setText(status)
self._ui.statusPx.setText("%.3f" % (self._machine.machineStatus().x() / 1000))
self._ui.statusPy.setText("%.3f" % (self._machine.machineStatus().y() / 1000))
self._ui.statusPz.setText("%.3f" % (self._machine.machineStatus().z() / 1000))
self._ui.statusPu.setText("%.3f" % (self._machine.machineStatus().u() / 1000))
self._ui.relX.setText("%.3f" % ((self._workpiecePos[0] - self._machine.machineStatus().x()) / 1000))
self._ui.relY.setText("%.3f" % ((self._workpiecePos[1] - self._machine.machineStatus().y()) / 1000))
self._ui.relZ.setText("%.3f" % ((self._workpiecePos[2] - self._machine.machineStatus().z()) / 1000))
if isinstance(self._machine.action(), ProgrammedMotionController):
self._ui.progress.setMaximum(self._machine.action().totalSteps())
self._ui.progress.setValue(self._machine.action().completedSteps())
elif self._inter and self._inter.pause:
if self._ui.progress.maximum():
QtGui.QMessageBox.information(
self, 'Tool Change',
'Insert tool %d now.' % self._inter.nextTool)
self._ui.progress.setMaximum(0)
else:
self._ui.progress.setMaximum(1)
self._ui.progress.setValue(0)
@QtCore.Slot()
def importGCode(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Import G-Code', '.')
if filename[0] == '': return
self.importGCodeFromFile(filename[0])
def importGCodeFromFile(self, filename):
parser = GCode.GCodeParser()
parser.readFile(filename)
parser.removeTapeMarkers()
parser.removeComments()
parser.removeInlineComments()
parser.removeBlockSkipLines()
parser.normalizeAddressWhitespace()
parser.normalizeLeadingZeros()
parser.readSequenceNumbers()
self._parser = parser
@QtCore.Slot()
def run(self):
if not self._machine.machineStatus().status() & 0x04:
reply = QtGui.QMessageBox.question(self, 'G-Code Import',
'Are you sure to import G-Code without reference movement?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return
if not self._storeButtonUsed:
reply = QtGui.QMessageBox.question(self, 'G-Code Import',
'Are you sure to import G-Code without setting workpiece location?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return
filters = [
Filters.OffsetFilter([ -self._originOffset[0], -self._originOffset[1] ]),
Filters.PolarFixer(self._polarCorrection[0], self._polarCorrection[1]),
Filters.OffsetFilter(self._workpiecePos)
]
fc = Filters.FilterChain(filters, CNCCon.CNCConWriter())
self._inter = GCode.GCodeInterpreter(fc)
self._inter.position = [
self._machine.machineStatus().x() - self._workpiecePos[0] + self._originOffset[0],
self._machine.machineStatus().y() - self._workpiecePos[1] + self._originOffset[1],
self._machine.machineStatus().z() - self._workpiecePos[2]
]
self._inter.invertZ = self._ui.invertZ.isChecked()
self._inter.run(self._parser)
self._machine.setAction(ProgrammedMotionController(self._machine))
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
self._machine.action().setCommands(self._inter.target.buffer)
@QtCore.Slot()
def resume(self):
if not self._inter:
QtGui.QMessageBox.information(
self, 'PyPC-NC',
'Interpreter not initialized. You need to open & "Run" first.')
return
if not self._inter.pause:
QtGui.QMessageBox.information(
self, 'PyPC-NC',
'Interpreter not currently paused. You may want to start over by clicking "Run".')
return
self._inter.target.buffer = [ ]
self._inter.position = [
self._machine.machineStatus().x() - self._workpiecePos[0] + self._originOffset[0],
self._machine.machineStatus().y() - self._workpiecePos[1] + self._originOffset[1],
self._machine.machineStatus().z() - self._workpiecePos[2]
]
self._inter.target.filters()[2].setOffsets(self._workpiecePos)
self._inter.resume(self._parser)
self._machine.setAction(ProgrammedMotionController(self._machine))
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
self._machine.action().setCommands(self._inter.target.buffer)
@QtCore.Slot(int)
def feedRateOverrideChanged(self, value):
if isinstance(self._machine.action(), ProgrammedMotionController):
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
@QtCore.Slot()
def storeXY(self):
self.storeX()
self.storeY()
@QtCore.Slot()
def storeXYZ(self):
self.storeXY()
self.storeZ()
@QtCore.Slot()
def storeX(self):
self._storeButtonUsed = True
self._workpiecePos[0] = self._machine.machineStatus().x()
@QtCore.Slot()
def storeY(self):
self._storeButtonUsed = True
self._workpiecePos[1] = self._machine.machineStatus().y()
@QtCore.Slot()
def storeZ(self):
self._storeButtonUsed = True
self._workpiecePos[2] = self._machine.machineStatus().z()
def gotoWorkpiece(self, x, y, z):
if isinstance(self._machine.action(), ProgrammedMotionController):
return
elif not isinstance(self._machine.action(), ManualMotionController):
self._machine.setAction(ManualMotionController(self._machine))
self._machine.action().gotoXYZ(x, y, z)
def workpiecePos(self):
return
def originOffset(self):
return self._originOffset
def setOriginOffset(self, x, y):
self._originOffset = (x, y)
def polarCorrection(self):
return self._polarCorrection
def setPolarCorrection(self, r, phi):
self._polarCorrection = (r, phi)
@QtCore.Slot()
def gotoWorkpieceXY(self):
self.gotoWorkpiece(self._workpiecePos[0], self._workpiecePos[1], None)
@QtCore.Slot()
def gotoWorkpieceXYZ(self):
self.gotoWorkpiece(self._workpiecePos[0], self._workpiecePos[1], self._workpiecePos[2])
@QtCore.Slot()
def gotoWorkpieceX(self):
self.gotoWorkpiece(self._workpiecePos[0], None, None)
@QtCore.Slot()
def gotoWorkpieceY(self):
self.gotoWorkpiece(None, self._workpiecePos[1], None)
@QtCore.Slot()
def gotoWorkpieceZ(self):
self.gotoWorkpiece(None, None, self._workpiecePos[2])
def workpiecePos(self):
return self._workpiecePos
@QtCore.Slot()
def driveXUp(self):
self.manualMove('X', True)
@QtCore.Slot()
def driveYUp(self):
self.manualMove('Y', True)
@QtCore.Slot()
def driveZUp(self):
self.manualMove('Z', True)
@QtCore.Slot()
def driveUUp(self):
self.manualMove('U', True)
@QtCore.Slot()
def driveXDown(self):
self.manualMove('X', False)
@QtCore.Slot()
def driveYDown(self):
self.manualMove('Y', False)
@QtCore.Slot()
def driveZDown(self):
self.manualMove('Z', False)
@QtCore.Slot()
def driveUDown(self):
self.manualMove('U', False)
def manualMove(self, axis, positive):
if isinstance(self._machine.action(), ProgrammedMotionController):
return
elif not isinstance(self._machine.action(), ManualMotionController):
self._machine.setAction(ManualMotionController(self._machine))
fast = self._ui.driveFast.isChecked()
if self._ui.drive1Step.isChecked():
self._machine.action().singleStep(axis, positive, fast)
elif self._ui.drive001mm.isChecked():
self._machine.action().manualMove(axis, positive, 10, fast)
elif self._ui.drive01mm.isChecked():
self._machine.action().manualMove(axis, positive, 100, fast)
elif self._ui.drive1mm.isChecked():
self._machine.action().manualMove(axis, positive, 1000, fast)
elif self._ui.drive10mm.isChecked():
self._machine.action().manualMove(axis, positive, 10000, fast)
elif self._ui.drive100mm.isChecked():
self._machine.action().manualMove(axis, positive, 100000, fast)
@QtCore.Slot(int)
def readControlEvent(self, fd):
ev = os.read(fd, 4)
if len(ev) != 4: return
button, x, y, z = struct.unpack('bbbb', ev)
print 'control event: button=%d, x=%d, y=%d, z=%d; time=%f' % (button, x, y, z, time.time())
if self._debounce != None and time.time() - self._debounce < .1:
if x != self._debounceX or y != self._debounceY:
print 'discarding event, bounce detected'
return
if x == -1:
self.manualMove('X', False)
elif x == 1:
self.manualMove('X', True)
if y == -1:
self.manualMove('Y', False)
elif y == 1:
self.manualMove('Y', True)
self._debounce = None
else:
self._debounce = time.time()
self._debounceX = x
self._debounceY = y
if z == -1:
self.manualMove('Z', False)
elif z == 1:
self.manualMove('Z', True)
@QtCore.Slot(int)
def pollStatus(self, fd):
self._machine.cts()
from Converters import GCode
from Control.MachineStatus import *
from Control.GraphicsView import ControlGraphicsView
from ui.MainWindow import Ui_MainWindow
| stesie/PyPC-NC | Control/MainWindow.py | Python | gpl-3.0 | 11,785 | 0.02919 |
import os
import sys
import re
import yaml
import uuid
import glob
from lib.tarantool_server import TarantoolServer
## Get cluster uuid
cluster_uuid = ''
try:
cluster_uuid = yaml.load(server.admin("box.space._schema:get('cluster')",
silent = True))[0][1]
uuid.UUID('{' + cluster_uuid + '}')
print 'ok - cluster uuid'
except Exception as e:
print 'not ok - invalid cluster uuid', e
server.iproto.reconnect() # re-connect with new permissions
print '-------------------------------------------------------------'
print ' gh-696: Check global READ permissions for replication'
print '-------------------------------------------------------------'
# Generate replica cluster UUID
replica_uuid = str(uuid.uuid4())
## Universal read permission is required to perform JOIN/SUBSCRIBE
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \
'ok' or 'not ok', '-', 'join without read permissions on universe'
rows = list(server.iproto.py_con.subscribe(cluster_uuid, replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \
'ok' or 'not ok', '-', 'subscribe without read permissions on universe'
## Write permission to space `_cluster` is required to perform JOIN
server.admin("box.schema.user.grant('guest', 'read', 'universe')")
server.iproto.reconnect() # re-connect with new permissions
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Write access') >= 0 and \
'ok' or 'not ok', '-', 'join without write permissions to _cluster'
def check_join(msg):
ok = True
for resp in server.iproto.py_con.join(replica_uuid):
if resp.completion_status != 0:
print 'not ok', '-', msg, resp.return_message
ok = False
server.iproto.reconnect() # the only way to stop JOIN
if not ok:
return
tuples = server.iproto.py_con.space('_cluster').select(replica_uuid, index = 1)
if len(tuples) == 0:
print 'not ok', '-', msg, 'missing entry in _cluster'
return
server_id = tuples[0][0]
print 'ok', '-', msg
return server_id
## JOIN with permissions
server.admin("box.schema.user.grant('guest', 'write', 'space', '_cluster')")
server.iproto.reconnect() # re-connect with new permissions
server_id = check_join('join with granted permissions')
server.iproto.py_con.space('_cluster').delete(server_id)
# JOIN with granted role
server.admin("box.schema.user.revoke('guest', 'read', 'universe')")
server.admin("box.schema.user.revoke('guest', 'write', 'space', '_cluster')")
server.admin("box.schema.user.grant('guest', 'replication')")
server.iproto.reconnect() # re-connect with new permissions
server_id = check_join('join with granted role')
server.iproto.py_con.space('_cluster').delete(server_id)
print '-------------------------------------------------------------'
print 'gh-707: Master crashes on JOIN if it does not have snapshot files'
print 'gh-480: If socket is closed while JOIN, replica wont reconnect'
print '-------------------------------------------------------------'
data_dir = os.path.join(server.vardir, server.name)
for k in glob.glob(os.path.join(data_dir, '*.snap')):
os.unlink(k)
# remember the number of servers in _cluster table
server_count = len(server.iproto.py_con.space('_cluster').select(()))
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) > 0 and rows[-1].return_message.find('.snap') >= 0 and \
'ok' or 'not ok', '-', 'join without snapshots'
res = server.iproto.py_con.space('_cluster').select(())
if server_count <= len(res):
print 'ok - _cluster did not change after unsuccessful JOIN'
else:
print 'not ok - _cluster did change after unsuccessful JOIN'
print res
server.admin("box.schema.user.revoke('guest', 'replication')")
server.admin('box.snapshot()')
print '-------------------------------------------------------------'
print 'gh-434: Assertion if replace _cluster tuple for local server'
print '-------------------------------------------------------------'
master_uuid = server.get_param('server')['uuid']
sys.stdout.push_filter(master_uuid, '<master uuid>')
# Invalid UUID
server.admin("box.space._cluster:replace{1, require('uuid').NULL:str()}")
# Update of UUID is not OK
server.admin("box.space._cluster:replace{1, require('uuid').str()}")
# Update of tail is OK
server.admin("box.space._cluster:update(1, {{'=', 3, 'test'}})")
print '-------------------------------------------------------------'
print 'gh-1140: Assertion if replace _cluster tuple for remote server'
print '-------------------------------------------------------------'
# Test that insert is OK
new_uuid = '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'
server.admin("box.space._cluster:insert{{5, '{0}'}}".format(new_uuid))
server.admin("box.info.vclock[5] == nil")
# Replace with the same UUID is OK
server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid))
# Replace with a new UUID is not OK
new_uuid = 'a48a19a3-26c0-4f8c-a5b5-77377bab389b'
server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid))
# Update of tail is OK
server.admin("box.space._cluster:update(5, {{'=', 3, 'test'}})")
# Delete is OK
server.admin("box.space._cluster:delete(5)")
# gh-1219: LSN must not be removed from vclock on unregister
server.admin("box.info.vclock[5] == nil")
# Cleanup
server.stop()
server.deploy()
print '-------------------------------------------------------------'
print 'Start a new replica and check box.info on the start'
print '-------------------------------------------------------------'
# master server
master = server
master_id = master.get_param('server')['id']
master.admin("box.schema.user.grant('guest', 'replication')")
replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
replica.vardir = server.vardir
replica.rpl_master = master
replica.deploy()
replica_id = replica.get_param('server')['id']
replica_uuid = replica.get_param('server')['uuid']
sys.stdout.push_filter(replica_uuid, '<replica uuid>')
replica.admin('box.info.server.id == %d' % replica_id)
replica.admin('not box.info.server.ro')
replica.admin('box.info.server.lsn == 0')
replica.admin('box.info.vclock[%d] == nil' % replica_id)
print '-------------------------------------------------------------'
print 'Modify data to change LSN and check box.info'
print '-------------------------------------------------------------'
replica.admin('box.space._schema:insert{"test", 48}')
replica.admin('box.info.server.lsn == 1')
replica.admin('box.info.vclock[%d] == 1' % replica_id)
print '-------------------------------------------------------------'
print 'Unregister replica and check box.info'
print '-------------------------------------------------------------'
# gh-527: update vclock on delete from box.space._cluster'
master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id)
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id ~= %d' % replica_id)
replica.admin('box.info.server.lsn == -1')
# gh-1219: LSN must not be removed from vclock on unregister
replica.admin('box.info.vclock[%d] == 1' % replica_id)
# gh-246: box.info.server.ro is controlled by box.cfg { read_only = xx }
# unregistration doesn't change box.info.server.ro
replica.admin('not box.info.server.ro')
# actually box is read-only if id is not registered
replica.admin('box.space._schema:replace{"test", 48}')
replica.admin('box.cfg { read_only = true }')
replica.admin('box.space._schema:replace{"test", 48}')
replica.admin('box.cfg { read_only = false }')
replica.admin('box.space._schema:replace{"test", 48}')
print '-------------------------------------------------------------'
print 'Re-register replica with the same server_id'
print '-------------------------------------------------------------'
replica.admin('box.cfg { read_only = true }')
master.admin('box.space._cluster:insert{%d, "%s"} ~= nil' %
(replica_id, replica_uuid))
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id == %d' % replica_id)
# gh-1219: LSN must not be removed from vclock on unregister
replica.admin('box.info.server.lsn == 1')
replica.admin('box.info.vclock[%d] == 1' % replica_id)
# gh-246: box.info.server.ro is controlled by box.cfg { read_only = xx }
# registration doesn't change box.info.server.ro
replica.admin('box.info.server.ro == true')
# is ro
replica.admin('box.space._schema:replace{"test", 48}')
replica.admin('box.cfg { read_only = false }')
# is not ro
#replica.admin('box.space._schema:replace{"test", 48}')
print '-------------------------------------------------------------'
print 'Re-register replica with a new server_id'
print '-------------------------------------------------------------'
master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id)
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica_id2 = 10
master.admin('box.space._cluster:insert{%d, "%s"} ~= nil' %
(replica_id2, replica_uuid))
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id == %d' % replica_id2)
replica.admin('not box.info.server.ro')
replica.admin('box.info.server.lsn == 0')
replica.admin('box.info.vclock[%d] == 1' % replica_id)
replica.admin('box.info.vclock[%d] == nil' % replica_id2)
print '-------------------------------------------------------------'
print 'Check that server_id can\'t be changed by UPDATE'
print '-------------------------------------------------------------'
replica_id3 = 11
server.admin("box.space._cluster:update(%d, {{'=', 1, %d}})" %
(replica_id2, replica_id3))
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id == %d' % replica_id2)
replica.admin('not box.info.server.ro')
replica.admin('box.info.server.lsn == 0')
replica.admin('box.info.vclock[%d] == 1' % replica_id)
replica.admin('box.info.vclock[%d] == nil' % replica_id2)
replica.admin('box.info.vclock[%d] == nil' % replica_id3)
print '-------------------------------------------------------------'
print 'Unregister replica and check box.info (second attempt)'
print '-------------------------------------------------------------'
# gh-527: update vclock on delete from box.space._cluster'
master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id2)
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id ~= %d' % replica_id)
# Backward-compatibility: box.info.server.lsn is -1 instead of nil
replica.admin('box.info.server.lsn == -1')
replica.admin('box.info.vclock[%d] == nil' % replica_id2)
print '-------------------------------------------------------------'
print 'JOIN replica to read-only master'
print '-------------------------------------------------------------'
#gh-1230 Assertion vclock_has on attempt to JOIN read-only master
failed = TarantoolServer(server.ini)
failed.script = 'replication-py/failed.lua'
failed.vardir = server.vardir
failed.rpl_master = replica
failed.name = "failed"
try:
failed.deploy()
except Exception as e:
line = "ER_READONLY"
if failed.logfile_pos.seek_once(line) >= 0:
print "'%s' exists in server log" % line
print '-------------------------------------------------------------'
print 'Sync master with replica'
print '-------------------------------------------------------------'
# Sync master with replica
replication_source = yaml.load(replica.admin('box.cfg.listen', silent = True))[0]
sys.stdout.push_filter(replication_source, '<replication_source>')
master.admin("box.cfg{ replication_source = '%s' }" % replication_source)
master.wait_lsn(replica_id, replica.get_lsn(replica_id))
master.admin('box.info.vclock[%d] == 1' % replica_id)
master.admin('box.info.vclock[%d] == nil' % replica_id2)
master.admin('box.info.vclock[%d] == nil' % replica_id3)
master.admin("box.cfg{ replication_source = '' }")
replica.stop()
replica.cleanup(True)
print '-------------------------------------------------------------'
print 'Start a new replica and check that server_id, LSN is re-used'
print '-------------------------------------------------------------'
#
# gh-1219: Proper removal of servers with non-zero LSN from _cluster
#
# Snapshot is required. Otherwise a relay will skip records made by previous
# replica with the re-used id.
master.admin("box.snapshot()")
master.admin('box.info.vclock[%d] == 1' % replica_id)
replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
replica.vardir = server.vardir
replica.rpl_master = master
replica.deploy()
replica.wait_lsn(master_id, master.get_lsn(master_id))
# Check that replica_id was re-used
replica.admin('box.info.server.id == %d' % replica_id)
replica.admin('not box.info.server.ro')
# All records were succesfully recovered.
# Replica should have the same vclock as master.
master.admin('box.info.vclock[%d] == 1' % replica_id)
replica.admin('box.info.vclock[%d] == 1' % replica_id)
master.admin('box.info.vclock[%d] == nil' % replica_id2)
replica.admin('box.info.vclock[%d] == nil' % replica_id2)
master.admin('box.info.vclock[%d] == nil' % replica_id3)
replica.admin('box.info.vclock[%d] == nil' % replica_id3)
print '-------------------------------------------------------------'
print 'Cleanup'
print '-------------------------------------------------------------'
replica.stop()
replica.cleanup(True)
# Cleanup
sys.stdout.pop_filter()
master.admin("box.schema.user.revoke('guest', 'replication')")
| mejedi/tarantool | test/replication-py/cluster.test.py | Python | bsd-2-clause | 13,538 | 0.001847 |
# -*- coding: utf-8 -*-
# __author__: Yixuan LI
# __email__: [email protected]
import os
import json
import re
from optparse import OptionParser
import tweepy
import time
class UserTimeline:
def __init__(self,inputDir,outputDir):
self.inputDir = inputDir
self.outputDir = outputDir
os.system("mkdir -p %s"%(outputDir))
# Get the names of the files under the input directory and save them in a list
self.fileList = os.listdir(inputDir)
print self.fileList
self.userHash = {} # [key,value] pair to record the unique users in the tweets
self.uniqueUserCount = 0 # count unique users in the dataset
self.tweetCount = 0 # total tweets processed
self.api = None
def authentication(self):
consumer_key="z86C8djY3bYOPD1WkYV73nVP6"
consumer_secret="BT8oKrcj955MKjv0qS8Kra2Iw91E3uSMTqEVurfTmKjXfG0hNm"
access_token="746349096-Bz1n8T6vNEFBAMG2YqVdJFOtrM321d5HeupxMlxM"
access_token_secret="ZZQZsjvJXnIlyl04Mg2vCxS8g122b3AljpiytiKCKRFPL"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
print "authentication finished"
def get_user_id(self):
written = 0
if os.path.exists(self.outputDir + "/" + "uniqueUserID.txt"):
pass
else:
for tweetFile in self.fileList[1:]:
with open(self.inputDir+"/"+tweetFile,'r') as fin:
for line in fin:
try:
lineContents = json.loads(line) # load a line
self.tweetCount += 1
print self.tweetCount # for debugging
except:
continue
try:
if lineContents["coordinates"] is not None:
continue
else:
# extract user's id
userID = lineContents["user"]["id"]
# extract tweet text and convert the string to lower case (http://stackoverflow.com/questions/6797984/how-to-convert-string-to-lowercase-in-python)
#tweet = lineContents["text"].lower()
if not self.userHash.has_key(userID): # if the user has not been counted
self.uniqueUserCount += 1 # count the number of unique users
self.userHash[userID] = True
fileNum = int(self.uniqueUserCount/7250 + 1)
with open(self.outputDir + "/" + "uniqueUserID_"+str(fileNum)+".txt","a") as fileout:
written += 1
fileout.write(str(userID))
fileout.write("\n")
print written," written"
except:
continue
print "There are ", self.uniqueUserCount, "unique users"
print self.tweetCount, " tweets processed"
def get_user_timeline(self):
with open(self.outputDir + "/" + "uniqueUserID_6.txt",'r') as fin:
for userID in fin:
# store the tweets of each user in a single file named by the {userID}.json
filePath = self.outputDir + "/" + str(userID[:-1])+".json"
print userID
if os.path.exists(filePath):
with open(filePath,'r') as myfile:
count = sum(1 for line in myfile)
if count > 900:
continue
else:
# http://stackoverflow.com/questions/6996603/how-do-i-delete-a-file-or-folder-in-python
os.remove(filePath)
pageCount = 1
trialTime = 0
# get user timeline tweets
while pageCount < 6:
print "Collecting", pageCount, " -th page"
# open the output file in append mode
self.fout = open(filePath,"a")
try:
tweets = self.api.user_timeline(id=userID,count=200,page=pageCount)
pageCount += 1
except:
time.sleep(70)
trialTime += 1
if trialTime == 2:
pageCount = 8
continue
# write to file
# Note that data returned by api.user_timeline is status object
for tweet in tweets:
print tweet.text
# convert tweepy status object to json format
# http://stackoverflow.com/questions/27900451/convert-tweepy-status-object-into-json
self.fout.write(json.dumps(tweet._json))
self.fout.write('\n')
time.sleep(70) # rate limit (15 requests per 15 minutes window)
if __name__=='__main__':
#########################################################################################
# Parse the arguments
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
usage = "usage: python plot_stats.py [options]"
description = """
"""
epilog = """
"""
parser = MyParser(usage, description=description,epilog=epilog)
parser.add_option("--inputDir", "--input file of twitter data", dest="input_path", default=None,
help="input directory of twitter streaming data in JSON format [default: None]")
parser.add_option("--outputDir", "--output directory of twitter user timeline data", dest="output_path", default=None,
help="output directory of twitter user timeline data [default: None]")
(options, args) = parser.parse_args()
# input directory
inputDir = options.input_path
# output directory
outputDir = options.output_path
########################################################################
getter = UserTimeline(inputDir,outputDir)
getter.authentication()
#getter.get_user_id()
getter.get_user_timeline()
| YixuanLi/geo-tweet | twitter-timeline/get_non_locator_timeline.py | Python | gpl-2.0 | 5,282 | 0.029156 |
# Copyright (C) 2014 Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************************
**espressopp.analysis.TotalVelocity**
*************************************
.. function:: espressopp.analysis.TotalVelocity(system)
:param system: The system object.
:type system: espressopp.System
.. function:: espressopp.analysis.TotalVelocity.compute()
Compute the total velocity of the system.
:rtype: float
.. function:: espressopp.analysis.TotalVelocity.reset()
Subtract the total velocity of the system from every particle.
Examples
---------
Reset the velocity
++++++++++++++++++++
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> total_velocity.reset()
Extension to integrator
++++++++++++++++++++++++++++++++++++++++++++
This extension can also be attached to integrator and run `reset()` every `n-th` steps.
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> ext_remove_com = espressopp.analysis.ExtAnalyze(total_velocity, 10)
>>> integrator.addExtension(ext_remove_com)
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_TotalVelocity
class TotalVelocityLocal(ObservableLocal, analysis_TotalVelocity):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_TotalVelocity, system)
def compute(self):
return self.cxxclass.compute(self)
def reset(self):
return self.cxxclass.reset(self)
if pmi.isController :
class TotalVelocity(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.TotalVelocityLocal',
pmicall = [ "compute", "reset" ],
pmiproperty = ["v"]
)
| capoe/espressopp.soap | src/analysis/TotalVelocity.py | Python | gpl-3.0 | 2,733 | 0.011343 |
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from tastypie import fields
from tastypie.authentication import (
ApiKeyAuthentication,
MultiAuthentication,
SessionAuthentication
)
from tastypie.resources import ModelResource
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from wger.core.api.resources import DaysOfWeekResource
from wger.exercises.api.resources import ExerciseResource
from wger.utils.resources import UserObjectsOnlyAuthorization
from wger.manager.models import (
WorkoutSession,
Workout,
Schedule,
ScheduleStep,
Day,
Set,
Setting,
WorkoutLog
)
class WorkoutResource(ModelResource):
'''
Resource for workouts
'''
days = fields.ToManyField('wger.manager.api.resources.DayResource', 'day_set')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = Workout.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
"comment": ALL,
"creation_date": ALL}
class WorkoutSessionResource(ModelResource):
'''
Resource for workout sessions
'''
workout = fields.ToOneField('wger.manager.api.resources.WorkoutResource', 'workout')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = WorkoutSession.objects.all()
authentication = MultiAuthentication(SessionAuthentication(), ApiKeyAuthentication())
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
"date": ALL,
"time_start": ALL,
"time_end": ALL}
class ScheduleStepResource(ModelResource):
'''
Resource for schedule steps
'''
workout = fields.ToOneField(WorkoutResource, 'workout')
schedule = fields.ToOneField('wger.manager.api.resources.ScheduleResource', 'schedule')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(schedule__user=bundle.request.user)
class Meta:
queryset = ScheduleStep.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'schedule': ALL_WITH_RELATIONS,
'workout': ALL_WITH_RELATIONS}
class ScheduleResource(ModelResource):
'''
Resource for schedules
'''
steps = fields.ToManyField(ScheduleStepResource, 'schedulestep_set')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = Schedule.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'is_active': ALL,
'is_loop': ALL,
'name': ALL}
class DayResource(ModelResource):
'''
Resource for training days
'''
workout = fields.ToOneField(WorkoutResource, 'training')
days_of_week = fields.ToManyField(DaysOfWeekResource, 'day')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(training__user=bundle.request.user)
class Meta:
queryset = Day.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'description': ALL,
'workout': ALL_WITH_RELATIONS}
class SetResource(ModelResource):
'''
Resource for training sets
'''
day = fields.ToOneField(DayResource, 'exerciseday')
exercises = fields.ToManyField(ExerciseResource, 'exercises')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(exerciseday__training__user=bundle.request.user)
class Meta:
queryset = Set.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'day': ALL_WITH_RELATIONS,
'order': ALL,
'sets': ALL}
class SettingResource(ModelResource):
'''
Resource for training settings
'''
set = fields.ToOneField(SetResource, 'set')
exercise = fields.ToOneField(ExerciseResource, 'exercise')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(set__exerciseday__training__user=bundle.request.user)
class Meta:
queryset = Setting.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'exercise': ALL_WITH_RELATIONS,
'order': ALL,
'reps': ALL,
'set': ALL_WITH_RELATIONS}
class WorkoutLogResource(ModelResource):
'''
Resource for a workout log
'''
exercise = fields.ToOneField(ExerciseResource, 'exercise')
workout = fields.ToOneField(WorkoutResource, 'workout')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = WorkoutLog.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'date': ALL,
'exercise': ALL_WITH_RELATIONS,
'reps': ALL,
'weight': ALL,
'workout': ALL_WITH_RELATIONS}
| DeveloperMal/wger | wger/manager/api/resources.py | Python | agpl-3.0 | 6,864 | 0.000874 |
# Multiple Linear Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# One hot encoding
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
# The Linear Regression library includes the constant, but the statsmodels does not
# so we have to add it to our model:
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis=1)
# Building the optimal model using Backwards Elimination
import statsmodels.formula.api as sm
# Step 1
SL = 0.05
# Step 2, using Ordinary Least Squares from statsmodels (instead of Linear Regression from linear_model)
X_opt = X[:,[0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,1,3,4,5]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3,4,5]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3,5]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Finished
| balazssimon/ml-playground | udemy/Machine Learning A-Z/Part 2 - Regression/Section 5 - Multiple Linear Regression/backward_elimination_manual.py | Python | apache-2.0 | 1,681 | 0.030934 |
from UI import UI
from View import View
import Elements
import Colors
| v4nz666/7drl2017 | RoguePy/UI/__init__.py | Python | gpl-3.0 | 70 | 0 |
import asyncio
from aio_pika import connect, IncomingMessage, ExchangeType
loop = asyncio.get_event_loop()
async def on_message(message: IncomingMessage):
async with message.process():
print("[x] %r" % message.body)
async def main():
# Perform connection
connection = await connect(
"amqp://guest:guest@localhost/", loop=loop
)
# Creating a channel
channel = await connection.channel()
await channel.set_qos(prefetch_count=1)
logs_exchange = await channel.declare_exchange(
"logs", ExchangeType.FANOUT
)
# Declaring queue
queue = await channel.declare_queue(exclusive=True)
# Binding the queue to the exchange
await queue.bind(logs_exchange)
# Start listening the queue with name 'task_queue'
await queue.consume(on_message)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.create_task(main())
# we enter a never-ending loop that waits for data
# and runs callbacks whenever necessary.
print(" [*] Waiting for logs. To exit press CTRL+C")
loop.run_forever()
| mosquito/aio-pika | docs/source/rabbitmq-tutorial/examples/3-publish-subscribe/receive_logs.py | Python | apache-2.0 | 1,094 | 0 |
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal Jacob <[email protected]>
#
__dff_module_info_version__ = "1.0.0"
from api.vfs import *
from api.module.script import *
from api.loader import *
from api.module.module import *
from api.taskmanager.taskmanager import *
from api.types.libtypes import Parameter, Variant, Argument, typeId, ConfigManager
from datetime import timedelta, datetime
from ui.console.utils import VariantTreePrinter
class INFO(Script, VariantTreePrinter):
def __init__(self):
Script.__init__(self, "info")
VariantTreePrinter.__init__(self)
self.loader = loader.loader()
self.tm = TaskManager()
self.cm = ConfigManager.Get()
def show_config(self, modname):
conf = self.cm.configByName(modname)
res = "\n\tConfig:"
arguments = conf.arguments()
for argument in arguments:
res += "\n\t\tname: " + str(argument.name())
res += "\n\t\tdescription: " + str(argument.description())
if argument.inputType() == Argument.Empty:
res += "\n\t\tno input parameters"
else:
res += "\n\t\ttype: " + str(typeId.Get().typeToName(argument.type()))
res += "\n\t\trequirement: "
if argument.requirementType() == Argument.Optional:
res += "optional"
else:
res += "mandatory"
res += "\n\t\tinput parameters: "
if argument.parametersType() == Parameter.NotEditable:
res += "not editable "
else:
res += "editable "
if argument.inputType() == Argument.List:
res += "list"
else:
res += "single"
pcount = argument.parametersCount()
if pcount != 0:
parameters = argument.parameters()
res += "\n\t\tpredefined parameters: "
for parameter in parameters:
if argument.type() == typeId.Node:
res += str(parameter.value().absolute())
else:
res += parameter.toString()
pcount -= 1
if pcount != 0:
res += ", "
res += "\n"
constants = conf.constants()
if len(constants) > 0:
res += "\n\tConstant: \t"
for constant in constants:
res += "\n\t\tname: " + str(constant.name())
res += "\n\t\tdescription: " + str(constant.description())
res += "\n\t\ttype: " + str(typeId.Get().typeToName(constant.type()))
cvalues = constant.values()
cvallen = len(cvalues)
if cvallen > 0:
res += "\n\t\tvalues: "
for cvalue in cvalues:
if cvalue.type() == typeId.Node:
res += str(cvalue.value().absolute())
else:
res += cvalue.toString()
cvallen -= 1
if cvallen != 0:
res += ", "
res += "\n"
return res
def show_arg(self, args):
res = ""
if len(args):
res += "\n\n\t\tArguments: \t"
for argname in args.keys():
res += "\n\t\t\tname: " + argname
res += "\n\t\t\tparameters: "
val = args[argname]
if val.type() == typeId.List:
vlist = val.value()
vlen = len(vlist)
for item in vlist:
if item.type == typeId.Node:
res += str(val.value().absolute())
else:
res += item.toString()
vlen -= 1
if vlen != 0:
res += ", "
elif val.type() == typeId.Node:
res += str(val.value().absolute())
return res
def show_res(self, results):
res = self.fillMap(3, results, "\n\n\t\tResults:")
return res
def c_display(self):
print self.info
def getmodinfo(self, modname):
conf = self.cm.configByName(modname)
if conf == None:
return
self.lproc = self.tm.lprocessus
self.info += "\n" + modname + self.show_config(modname)
for proc in self.lproc:
if proc.mod.name == modname:
self.info += "\n\tProcessus " + str(proc.pid)
stime = datetime.fromtimestamp(proc.timestart)
self.info += "\n\t\texecution started at : " + str(stime)
if proc.timeend:
etime = datetime.fromtimestamp(proc.timeend)
self.info += "\n\t\texecution finished at : " + str(etime)
else:
etime = datetime.fromtimestamp(time.time())
delta = etime - stime
self.info += "\n\t\texecution time: " + str(delta)
self.info += self.show_arg(proc.args)
self.info += self.show_res(proc.res)
def start(self, args):
self.info = ""
if args.has_key("modules"):
modnames = args['modules'].value()
for modname in modnames:
self.getmodinfo(modname.value())
else:
self.modules = self.loader.modules
for modname in self.modules:
self.getmodinfo(modname)
class info(Module):
"""Show info on loaded drivers: configuration, arguments, results
"""
def __init__(self):
Module.__init__(self, "info", INFO)
self.tags = "builtins"
self.conf.addArgument({"name": "modules",
"description": "Display information concerning provided modules",
"input": Argument.Optional|Argument.List|typeId.String})
| halbbob/dff | modules/builtins/info.py | Python | gpl-2.0 | 5,653 | 0.012383 |
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.pinax_hello",
"pinax.pinax_hello.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.pinax_hello.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.pinax_hello.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
| bennybauer/pinax-hello | runtests.py | Python | mit | 1,274 | 0.000785 |
# -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
| mathieudesro/toad | tasks/11-tensormrtrix.py | Python | gpl-2.0 | 4,911 | 0.007127 |
import logging
import os
import shutil
from fusesoc.provider.provider import Provider
from fusesoc.utils import Launcher
logger = logging.getLogger(__name__)
class Coregen(Provider):
def _checkout(self, local_dir):
script_file = self.config.get('script_file')
project_file = self.config.get('project_file')
extra_files = self.config.get('extra_files')
logger.info("Using Coregen to generate project " + project_file)
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
src_files = [script_file, project_file]
if extra_files:
src_files += extra_files.split()
for f in src_files:
f_src = os.path.join(self.core_root, f)
f_dst = os.path.join(local_dir, f)
if os.path.exists(f_src):
d_dst = os.path.dirname(f_dst)
if not os.path.exists(d_dst):
os.makedirs(d_dst)
shutil.copyfile(f_src, f_dst)
else:
logger.error('Cannot find file %s' % f_src)
args = ['-r',
'-b', script_file,
'-p', project_file]
Launcher('coregen', args, cwd=local_dir).run()
| imphil/fusesoc | fusesoc/provider/coregen.py | Python | gpl-3.0 | 1,221 | 0.002457 |
# Copyright (C) 2008-2012 Joachim B. Haga and Fredrik Valdmanis
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Martin Sandve Alnaes, 2008.
# Modified by Anders Logg, 2008-2010.
#
# First added: 2008-03-05
# Last changed: 2013-05-29
import os
import dolfin.cpp as cpp
import ufl
__all__ = ['plot']
# Compatibility with book
def _VTKPlotter_write_ps(self, *args, **kwargs) :
print "*** Warning: VTKPlotter::write_ps() is not implemented -- use write_pdf instead"
def plot(object, *args, **kwargs):
"""
Plot given object.
*Arguments*
object
a :py:class:`Mesh <dolfin.cpp.Mesh>`, a :py:class:`MeshFunction
<dolfin.cpp.MeshFunction>`, a :py:class:`Function
<dolfin.functions.function.Function>`, a :py:class:`Expression`
<dolfin.cpp.Expression>, a :py:class:`DirichletBC`
<dolfin.cpp.DirichletBC> or a :py:class:`FiniteElement
<ufl.FiniteElement>`.
*Examples of usage*
In the simplest case, to plot only e.g. a mesh, simply use
.. code-block:: python
mesh = UnitSquare(4,4)
plot(mesh)
Use the ``title`` argument to specify title of the plot
.. code-block:: python
plot(mesh, tite="Finite element mesh")
It is also possible to plot an element
.. code-block:: python
element = FiniteElement("BDM", tetrahedron, 3)
plot(element)
Vector valued functions can be visualized with an alternative mode
.. code-block:: python
plot(u, mode = "glyphs")
A more advanced example
.. code-block:: python
plot(u,
wireframe = True, # use wireframe rendering
interactive = False, # do not hold plot on screen
scalarbar = False, # hide the color mapping bar
hardcopy_prefix = "myplot", # default plotfile name
scale = 2.0 # scale the warping/glyphs
title = "Fancy plot" # Set your own title
)
"""
mesh = kwargs.get('mesh')
p = cpp.Parameters()
for key in kwargs:
# If there is a "mesh" kwarg it should not be added to the parameters
if key != "mesh":
try:
p.add(key, kwargs[key])
except TypeError:
cpp.warning("Incompatible type for keyword argument \"%s\". Ignoring." % key)
# Plot element
if isinstance(object, ufl.FiniteElementBase):
if os.environ.get("DOLFIN_NOPLOT", "0") != "0": return
import ffc
return ffc.plot(object, *args, **kwargs)
if mesh is None and len(args) == 1 and isinstance(args[0], cpp.Mesh):
mesh = args[0]
# Plot expression
if isinstance(object, cpp.Expression):
if mesh is None:
raise TypeError, "expected a mesh when plotting an expression."
return cpp.plot(object, mesh, p)
# Try to project if object is not a standard plottable type
if not isinstance(object, (cpp.Function, cpp.Expression, cpp.Mesh,
cpp.DirichletBC, cpp.MeshFunction, cpp.MeshFunctionBool,
cpp.MeshFunctionInt, cpp.MeshFunctionDouble,
cpp.MeshFunctionSizet, cpp.DirichletBC, cpp.CSGGeometry)):
from dolfin.fem.projection import project
try:
cpp.info("Object cannot be plotted directly, projecting to"\
" piecewise linears.")
object = project(object, mesh=mesh)
except Exception as e:
raise RuntimeError(("Don't know how to plot given object:\n %s\n"\
"and projection failed:\n %s") % (str(object), str(e)))
plot_object = cpp.plot(object, p)
plot_object.write_ps = _VTKPlotter_write_ps
# Avoid premature deletion of plotted objects if they go out of scope
# before the plot window is closed. The plotter itself is safe, since it's
# created in the plot() C++ function, not directly from Python. But the
# Python plotter proxy may disappear, so we can't store the references
# there.
global _objects_referenced_from_plot_windows
_objects_referenced_from_plot_windows[plot_object.key()] = (object, mesh, p)
return plot_object
_objects_referenced_from_plot_windows = {}
| maciekswat/dolfin_1.3.0 | site-packages/dolfin/common/plotting.py | Python | gpl-3.0 | 4,997 | 0.003602 |
from DIRAC import gLogger
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.DISET.AuthManager import AuthManager
from WebAppDIRAC.Lib.SessionData import SessionData
from WebAppDIRAC.Lib import Conf
import ssl
import functools
import sys
import types
import json
import traceback
import tornado.web
import tornado.ioloop
import tornado.gen
import tornado.stack_context
import tornado.websocket
from concurrent.futures import ThreadPoolExecutor
global gThreadPool
gThreadPool = ThreadPoolExecutor( 100 )
class WErr( tornado.web.HTTPError ):
def __init__( self, code, msg = "", **kwargs ):
super( WErr, self ).__init__( code, str( msg ) or None )
for k in kwargs:
setattr( self, k, kwargs[ k ] )
self.ok = False
self.msg = msg
self.kwargs = kwargs
def __str__( self ):
return super( tornado.web.HTTPError, self ).__str__()
@classmethod
def fromSERROR( cls, result ):
#Prevent major fuckups with % in the message
return cls( 500, result[ 'Message' ].replace( "%", "" ) )
class WOK( object ):
def __init__( self, data = False, **kwargs ):
for k in kwargs:
setattr( self, k, kwargs[ k ] )
self.ok = True
self.data = data
def asyncWithCallback( method ):
return tornado.web.asynchronous( method )
def asyncGen( method ):
return tornado.gen.coroutine( method )
class WebHandler( tornado.web.RequestHandler ):
__disetConfig = ThreadConfig()
__log = False
#Auth requirements
AUTH_PROPS = None
#Location of the handler in the URL
LOCATION = ""
#URL Schema with holders to generate handler urls
URLSCHEMA = ""
#RE to extract group and setup
PATH_RE = ""
#Helper function to create threaded gen.Tasks with automatic callback and execption handling
def threadTask( self, method, *args, **kwargs ):
"""
Helper method to generate a gen.Task and automatically call the callback when the real
method ends. THIS IS SPARTAAAAAAAAAA. SPARTA has improved using futures ;)
"""
#Save the task to access the runner
genTask = False
#This runs in the separate thread, calls the callback on finish and takes into account exceptions
def cbMethod( *cargs, **ckwargs ):
cb = ckwargs.pop( 'callback' )
method = cargs[0]
disetConf = cargs[1]
cargs = cargs[2]
self.__disetConfig.reset()
self.__disetConfig.load( disetConf )
ioloop = tornado.ioloop.IOLoop.instance()
try:
result = method( *cargs, **ckwargs )
ioloop.add_callback( functools.partial( cb, result ) )
except Exception as excp:
gLogger.error( "Following exception occured %s" % excp )
exc_info = sys.exc_info()
genTask.set_exc_info( exc_info )
ioloop.add_callback( lambda : genTask.exception() )
#Put the task in the thread :)
def threadJob( tmethod, *targs, **tkwargs ):
tkwargs[ 'callback' ] = tornado.stack_context.wrap( tkwargs[ 'callback' ] )
targs = ( tmethod, self.__disetDump, targs )
gThreadPool.submit( cbMethod, *targs, **tkwargs )
#Return a YieldPoint
genTask = tornado.gen.Task( threadJob, method, *args, **kwargs )
return genTask
def __disetBlockDecor( self, func ):
def wrapper( *args, **kwargs ):
raise RuntimeError( "All DISET calls must be made from inside a Threaded Task! Bad boy!" )
return wrapper
def __init__( self, *args, **kwargs ):
"""
Initialize the handler
"""
super( WebHandler, self ).__init__( *args, **kwargs )
if not WebHandler.__log:
WebHandler.__log = gLogger.getSubLogger( self.__class__.__name__ )
self.__credDict = {}
self.__setup = Conf.setup()
self.__processCredentials()
self.__disetConfig.reset()
self.__disetConfig.setDecorator( self.__disetBlockDecor )
self.__disetDump = self.__disetConfig.dump()
match = self.PATH_RE.match( self.request.path )
self._pathResult = self.__checkPath( *match.groups() )
self.__sessionData = SessionData( self.__credDict, self.__setup )
def __processCredentials( self ):
"""
Extract the user credentials based on the certificate or what comes from the balancer
"""
#NGINX
if Conf.balancer() == "nginx":
headers = self.request.headers
if headers[ 'X-Scheme' ] == "https" and headers[ 'X-Ssl_client_verify' ] == 'SUCCESS':
DN = headers[ 'X-Ssl_client_s_dn' ]
self.__credDict[ 'DN' ] = DN
self.__credDict[ 'issuer' ] = headers[ 'X-Ssl_client_i_dn' ]
result = Registry.getUsernameForDN( DN )
if not result[ 'OK' ]:
self.__credDict[ 'validDN' ] = False
else:
self.__credDict[ 'validDN' ] = True
self.__credDict[ 'username' ] = result[ 'Value' ]
return
#TORNADO
if not self.request.protocol == "https":
return
derCert = self.request.get_ssl_certificate( binary_form = True )
if not derCert:
return
pemCert = ssl.DER_cert_to_PEM_cert( derCert )
chain = X509Chain()
chain.loadChainFromString( pemCert )
result = chain.getCredentials()
if not result[ 'OK' ]:
self.log.error( "Could not get client credentials %s" % result[ 'Message' ] )
return
self.__credDict = result[ 'Value' ]
#Hack. Data coming from OSSL directly and DISET difer in DN/subject
try:
self.__credDict[ 'DN' ] = self.__credDict[ 'subject' ]
except KeyError:
pass
def _request_summary( self ):
"""
Return a string returning the summary of the request
"""
summ = super( WebHandler, self )._request_summary()
cl = []
if self.__credDict.get( 'validDN', False ):
cl.append( self.__credDict[ 'username' ] )
if self.__credDict.get( 'validGroup', False ):
cl.append( "@%s" % self.__credDict[ 'group' ] )
cl.append( " (%s)" % self.__credDict[ 'DN' ] )
summ = "%s %s" % ( summ, "".join( cl ) )
return summ
@property
def log( self ):
return self.__log
@classmethod
def getLog( cls ):
return cls.__log
def getUserDN( self ):
return self.__credDict.get( 'DN', '' )
def getUserName( self ):
return self.__credDict.get( 'username', '' )
def getUserGroup( self ):
return self.__credDict.get( 'group', '' )
def getUserSetup( self ):
return self.__setup
def getUserProperties( self ):
return self.__sessionData.getData().properties
def isRegisteredUser( self ):
return self.__credDict.get( 'validDN', "" ) and self.__credDict.get( 'validGroup', "" )
def getSessionData( self ):
return self.__sessionData.getData()
def actionURL( self, action = "" ):
"""
Given an action name for the handler, return the URL
"""
if action == "index":
action = ""
group = self.getUserGroup()
if group:
group = "/g:%s" % group
setup = self.getUserSetup()
if setup:
setup = "/s:%s" % setup
location = self.LOCATION
if location:
location = "/%s" % location
ats = dict( action = action, group = group, setup = setup, location = location )
return self.URLSCHEMA % ats
def __auth( self, handlerRoute, group ):
"""
Authenticate request
"""
userDN = self.getUserDN()
if group:
self.__credDict[ 'group' ] = group
else:
if userDN:
result = Registry.findDefaultGroupForDN( userDN )
if result[ 'OK' ]:
self.__credDict[ 'group' ] = result[ 'Value' ]
self.__credDict[ 'validGroup' ] = False
if type( self.AUTH_PROPS ) not in ( types.ListType, types.TupleType ):
self.AUTH_PROPS = [ p.strip() for p in self.AUTH_PROPS.split( "," ) if p.strip() ]
allAllowed = False
for p in self.AUTH_PROPS:
if p.lower() in ( 'all', 'any' ):
allAllowed = True
auth = AuthManager( Conf.getAuthSectionForHandler( handlerRoute ) )
ok = auth.authQuery( "", self.__credDict, self.AUTH_PROPS )
if ok:
if userDN:
self.__credDict[ 'validGroup' ] = True
self.log.info( "AUTH OK: %s by %s@%s (%s)" % ( handlerRoute, self.__credDict[ 'username' ], self.__credDict[ 'group' ], userDN ) )
else:
self.__credDict[ 'validDN' ] = False
self.log.info( "AUTH OK: %s by visitor" % ( handlerRoute ) )
elif allAllowed:
self.log.info( "AUTH ALL: %s by %s" % ( handlerRoute, userDN ) )
ok = True
else:
self.log.info( "AUTH KO: %s by %s@%s" % ( handlerRoute, userDN, group ) )
return ok
def __checkPath( self, setup, group, route ):
"""
Check the request, auth, credentials and DISET config
"""
if route[-1] == "/":
methodName = "index"
handlerRoute = route
else:
iP = route.rfind( "/" )
methodName = route[ iP + 1: ]
handlerRoute = route[ :iP ]
if setup:
self.__setup = setup
if not self.__auth( handlerRoute, group ):
return WErr( 401, "Unauthorized, bad boy!" )
DN = self.getUserDN()
if DN:
self.__disetConfig.setDN( DN )
group = self.getUserGroup()
if group:
self.__disetConfig.setGroup( group )
self.__disetConfig.setSetup( setup )
self.__disetDump = self.__disetConfig.dump()
return WOK( methodName )
def get( self, setup, group, route ):
if not self._pathResult.ok:
raise self._pathResult
methodName = "web_%s" % self._pathResult.data
try:
mObj = getattr( self, methodName )
except AttributeError as e:
self.log.fatal( "This should not happen!! %s" % e )
raise tornado.web.HTTPError( 404 )
return mObj()
def post( self, *args, **kwargs ):
return self.get( *args, **kwargs )
def write_error( self, status_code, **kwargs ):
self.set_status( status_code )
cType = "text/plain"
data = self._reason
if 'exc_info' in kwargs:
ex = kwargs[ 'exc_info' ][1]
trace = traceback.format_exception( *kwargs["exc_info"] )
if not isinstance( ex, WErr ):
data += "\n".join( trace )
else:
if self.settings.get("debug"):
self.log.error( "Request ended in error:\n %s" % "\n ".join( trace ) )
data = ex.msg
if type( data ) == types.DictType:
cType = "application/json"
data = json.dumps( data )
self.set_header( 'Content-Type', cType )
self.finish( data )
class WebSocketHandler( tornado.websocket.WebSocketHandler, WebHandler ):
def __init__( self, *args, **kwargs ):
WebHandler.__init__( self, *args, **kwargs )
tornado.websocket.WebSocketHandler.__init__( self, *args, **kwargs )
def open( self, setup, group, route ):
if not self._pathResult.ok:
raise self._pathResult
return self.on_open()
def on_open( self ):
pass
| zmathe/WebAppDIRAC | Lib/WebHandler.py | Python | gpl-3.0 | 10,812 | 0.045135 |
from functools import wraps
from flask import current_app, request
from standup.errors import api_error
def api_key_required(view):
@wraps(view)
def wrapper(*args, **kwargs):
data = request.args if request.method == 'GET' else request.form
api_key = data.get('api_key', '')
if api_key != current_app.config.get('API_KEY'):
return api_error(403, 'Forbidden: Invalid API key.')
return view(*args, **kwargs)
return wrapper
| rehandalal/standup | standup/apps/api2/decorators.py | Python | bsd-3-clause | 480 | 0 |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-06-20 19:55
import functools
import itertools
import logging
import os
from collections import defaultdict
from copy import copy
from itertools import chain
from typing import Union, List, Callable, Dict, Optional, Any, Iterable, Tuple
import numpy as np
import torch
from hanlp_common.constant import IDX, BOS, EOS
from hanlp_common.document import Document
from hanlp_common.util import merge_locals_kwargs, topological_sort, reorder, prefix_match
from hanlp_common.visualization import markdown_table
from toposort import toposort
from torch.utils.data import DataLoader
import hanlp.utils.torch_util
from hanlp.common.dataset import PadSequenceDataLoader, PrefetchDataLoader, CachedDataLoader
from hanlp.common.structure import History
from hanlp.common.torch_component import TorchComponent
from hanlp.common.transform import FieldLength, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule
from hanlp.layers.embeddings.embedding import Embedding
from hanlp.layers.transformers.pt_imports import optimization
from hanlp.layers.transformers.utils import pick_tensor_for_each_token
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp.transform.transformer_tokenizer import TransformerSequenceTokenizer
from hanlp.utils.time_util import CountdownTimer
from hanlp.utils.torch_util import clip_grad_norm
class MultiTaskModel(torch.nn.Module):
def __init__(self,
encoder: torch.nn.Module,
scalar_mixes: torch.nn.ModuleDict,
decoders: torch.nn.ModuleDict,
use_raw_hidden_states: dict) -> None:
super().__init__()
self.use_raw_hidden_states = use_raw_hidden_states
self.encoder: ContextualWordEmbeddingModule = encoder
self.scalar_mixes = scalar_mixes
self.decoders = decoders
class MultiTaskDataLoader(DataLoader):
def __init__(self, training=True, tau: float = 0.8, **dataloaders) -> None:
# noinspection PyTypeChecker
super().__init__(None)
self.tau = tau
self.training = training
self.dataloaders: Dict[str, DataLoader] = dataloaders if dataloaders else {}
# self.iterators = dict((k, iter(v)) for k, v in dataloaders.items())
def __len__(self) -> int:
if self.dataloaders:
return sum(len(x) for x in self.dataloaders.values())
return 0
def __iter__(self):
if self.training:
sampling_weights, total_size = self.sampling_weights
task_names = list(self.dataloaders.keys())
iterators = dict((k, itertools.cycle(v)) for k, v in self.dataloaders.items())
for i in range(total_size):
task_name = np.random.choice(task_names, p=sampling_weights)
yield task_name, next(iterators[task_name])
else:
for task_name, dataloader in self.dataloaders.items():
for batch in dataloader:
yield task_name, batch
@property
def sampling_weights(self):
sampling_weights = self.sizes
total_size = sum(sampling_weights)
Z = sum(pow(v, self.tau) for v in sampling_weights)
sampling_weights = [pow(v, self.tau) / Z for v in sampling_weights]
return sampling_weights, total_size
@property
def sizes(self):
return [len(v) for v in self.dataloaders.values()]
class MultiTaskLearning(TorchComponent):
def __init__(self, **kwargs) -> None:
""" A multi-task learning (MTL) framework. It shares the same encoder across multiple decoders. These decoders
can have dependencies on each other which will be properly handled during decoding. To integrate a component
into this MTL framework, a component needs to implement the :class:`~hanlp.components.mtl.tasks.Task` interface.
This framework mostly follows the architecture of :cite:`clark-etal-2019-bam` and :cite:`he-choi-2021-stem`, with additional scalar mix
tricks (:cite:`kondratyuk-straka-2019-75`) allowing each task to attend to any subset of layers. We also
experimented with knowledge distillation on single tasks, the performance gain was nonsignificant on a large
dataset. In the near future, we have no plan to invest more efforts in distillation, since most datasets HanLP
uses are relatively large, and our hardware is relatively powerful.
Args:
**kwargs: Arguments passed to config.
"""
super().__init__(**kwargs)
self.model: Optional[MultiTaskModel] = None
self.tasks: Dict[str, Task] = None
self.vocabs = None
def build_dataloader(self,
data,
batch_size,
shuffle=False,
device=None,
logger: logging.Logger = None,
gradient_accumulation=1,
tau: float = 0.8,
prune=None,
prefetch=None,
tasks_need_custom_eval=None,
cache=False,
debug=False,
**kwargs) -> DataLoader:
# This method is only called during training or evaluation but not prediction
dataloader = MultiTaskDataLoader(training=shuffle, tau=tau)
for i, (task_name, task) in enumerate(self.tasks.items()):
encoder_transform, transform = self.build_transform(task)
training = None
if data == 'trn':
if debug:
_data = task.dev
else:
_data = task.trn
training = True
elif data == 'dev':
_data = task.dev
training = False
elif data == 'tst':
_data = task.tst
training = False
else:
_data = data
if isinstance(data, str):
logger.info(f'[yellow]{i + 1} / {len(self.tasks)}[/yellow] Building [blue]{data}[/blue] dataset for '
f'[cyan]{task_name}[/cyan] ...')
# Adjust Tokenizer according to task config
config = copy(task.config)
config.pop('transform', None)
task_dataloader: DataLoader = task.build_dataloader(_data, transform, training, device, logger,
tokenizer=encoder_transform.tokenizer,
gradient_accumulation=gradient_accumulation,
cache=isinstance(data, str), **config)
# if prune:
# # noinspection PyTypeChecker
# task_dataset: TransformDataset = task_dataloader.dataset
# size_before = len(task_dataset)
# task_dataset.prune(prune)
# size_after = len(task_dataset)
# num_pruned = size_before - size_after
# logger.info(f'Pruned [yellow]{num_pruned} ({num_pruned / size_before:.1%})[/yellow] '
# f'samples out of {size_before}.')
if cache and data in ('trn', 'dev'):
task_dataloader: CachedDataLoader = CachedDataLoader(
task_dataloader,
f'{cache}/{os.getpid()}-{data}-{task_name.replace("/", "-")}-cache.pt' if isinstance(cache,
str) else None
)
dataloader.dataloaders[task_name] = task_dataloader
if data == 'trn':
sampling_weights, total_size = dataloader.sampling_weights
headings = ['task', '#batches', '%batches', '#scaled', '%scaled', '#epoch']
matrix = []
min_epochs = []
for (task_name, dataset), weight in zip(dataloader.dataloaders.items(), sampling_weights):
epochs = len(dataset) / weight / total_size
matrix.append(
[f'{task_name}', len(dataset), f'{len(dataset) / total_size:.2%}', int(total_size * weight),
f'{weight:.2%}', f'{epochs:.2f}'])
min_epochs.append(epochs)
longest = int(torch.argmax(torch.tensor(min_epochs)))
table = markdown_table(headings, matrix)
rows = table.splitlines()
cells = rows[longest + 2].split('|')
cells[-2] = cells[-2].replace(f'{min_epochs[longest]:.2f}',
f'[bold][red]{min_epochs[longest]:.2f}[/red][/bold]')
rows[longest + 2] = '|'.join(cells)
logger.info(f'[bold][yellow]{"Samples Distribution": ^{len(rows[0])}}[/yellow][/bold]')
logger.info('\n'.join(rows))
if prefetch and (data == 'trn' or not tasks_need_custom_eval):
dataloader = PrefetchDataLoader(dataloader, prefetch=prefetch)
return dataloader
def build_transform(self, task: Task) -> Tuple[TransformerSequenceTokenizer, TransformList]:
encoder: ContextualWordEmbedding = self.config.encoder
encoder_transform: TransformerSequenceTokenizer = task.build_tokenizer(encoder.transform())
length_transform = FieldLength('token', 'token_length')
transform = TransformList(encoder_transform, length_transform)
extra_transform = self.config.get('transform', None)
if extra_transform:
transform.insert(0, extra_transform)
return encoder_transform, transform
def build_optimizer(self,
trn,
epochs,
adam_epsilon,
weight_decay,
warmup_steps,
lr,
encoder_lr,
**kwargs):
model = self.model_
encoder = model.encoder
num_training_steps = len(trn) * epochs // self.config.get('gradient_accumulation', 1)
encoder_parameters = list(encoder.parameters())
parameter_groups: List[Dict[str, Any]] = []
decoders = model.decoders
decoder_optimizers = dict()
for k, task in self.tasks.items():
decoder: torch.nn.Module = decoders[k]
decoder_parameters = list(decoder.parameters())
if task.separate_optimizer:
decoder_optimizers[k] = task.build_optimizer(decoder=decoder, **kwargs)
else:
task_lr = task.lr or lr
parameter_groups.append({"params": decoder_parameters, 'lr': task_lr})
parameter_groups.append({"params": encoder_parameters, 'lr': encoder_lr})
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_parameters = set()
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
no_decay_parameters.add(p)
no_decay_by_lr = defaultdict(list)
for group in parameter_groups:
_lr = group['lr']
ps = group['params']
group['params'] = decay_parameters = []
group['weight_decay'] = weight_decay
for p in ps:
if p in no_decay_parameters:
no_decay_by_lr[_lr].append(p)
else:
decay_parameters.append(p)
for _lr, ps in no_decay_by_lr.items():
parameter_groups.append({"params": ps, 'lr': _lr, 'weight_decay': 0.0})
# noinspection PyTypeChecker
encoder_optimizer = optimization.AdamW(
parameter_groups,
lr=lr,
weight_decay=weight_decay,
eps=adam_epsilon,
)
encoder_scheduler = optimization.get_linear_schedule_with_warmup(encoder_optimizer,
num_training_steps * warmup_steps,
num_training_steps)
return encoder_optimizer, encoder_scheduler, decoder_optimizers
def build_criterion(self, **kwargs):
return dict((k, v.build_criterion(decoder=self.model_.decoders[k], **kwargs)) for k, v in self.tasks.items())
def build_metric(self, **kwargs):
metrics = MetricDict()
for key, task in self.tasks.items():
metric = task.build_metric(**kwargs)
assert metric, f'Please implement `build_metric` of {type(task)} to return a metric.'
metrics[key] = metric
return metrics
def execute_training_loop(self, trn: DataLoader, dev: DataLoader, epochs, criterion, optimizer, metric, save_dir,
logger: logging.Logger, devices, patience=0.5, **kwargs):
if isinstance(patience, float):
patience = int(patience * epochs)
best_epoch, best_metric = 0, -1
timer = CountdownTimer(epochs)
ratio_width = len(f'{len(trn)}/{len(trn)}')
epoch = 0
history = History()
for epoch in range(1, epochs + 1):
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, criterion, optimizer, metric, logger, history, ratio_width=ratio_width,
**self.config)
if dev:
self.evaluate_dataloader(dev, criterion, metric, logger, ratio_width=ratio_width, input='dev')
report = f'{timer.elapsed_human}/{timer.total_time_human}'
dev_score = metric.score
if dev_score > best_metric:
self.save_weights(save_dir)
best_metric = dev_score
best_epoch = epoch
report += ' [red]saved[/red]'
else:
report += f' ({epoch - best_epoch})'
if epoch - best_epoch >= patience:
report += ' early stop'
break
timer.log(report, ratio_percentage=False, newline=True, ratio=False)
for d in [trn, dev]:
self._close_dataloader(d)
if best_epoch != epoch:
logger.info(f'Restoring best model saved [red]{epoch - best_epoch}[/red] epochs ago')
self.load_weights(save_dir)
return best_metric
def _close_dataloader(self, d):
if isinstance(d, PrefetchDataLoader):
d.close()
if hasattr(d.dataset, 'close'):
self._close_dataloader(d.dataset)
elif isinstance(d, CachedDataLoader):
d.close()
elif isinstance(d, MultiTaskDataLoader):
for d in d.dataloaders.values():
self._close_dataloader(d)
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn: DataLoader,
criterion,
optimizer,
metric,
logger: logging.Logger,
history: History,
ratio_width=None,
gradient_accumulation=1,
encoder_grad_norm=None,
decoder_grad_norm=None,
patience=0.5,
eval_trn=False,
**kwargs):
self.model.train()
encoder_optimizer, encoder_scheduler, decoder_optimizers = optimizer
timer = CountdownTimer(len(trn))
total_loss = 0
self.reset_metrics(metric)
model = self.model_
encoder_parameters = model.encoder.parameters()
decoder_parameters = model.decoders.parameters()
for idx, (task_name, batch) in enumerate(trn):
decoder_optimizer = decoder_optimizers.get(task_name, None)
output_dict, _ = self.feed_batch(batch, task_name)
loss = self.compute_loss(batch, output_dict[task_name]['output'], criterion[task_name],
self.tasks[task_name])
if gradient_accumulation and gradient_accumulation > 1:
loss /= gradient_accumulation
loss.backward()
total_loss += float(loss.item())
if history.step(gradient_accumulation):
if self.config.get('grad_norm', None):
clip_grad_norm(model, self.config.grad_norm)
if encoder_grad_norm:
torch.nn.utils.clip_grad_norm_(encoder_parameters, encoder_grad_norm)
if decoder_grad_norm:
torch.nn.utils.clip_grad_norm_(decoder_parameters, decoder_grad_norm)
encoder_optimizer.step()
encoder_optimizer.zero_grad()
encoder_scheduler.step()
if decoder_optimizer:
if isinstance(decoder_optimizer, tuple):
decoder_optimizer, decoder_scheduler = decoder_optimizer
else:
decoder_scheduler = None
decoder_optimizer.step()
decoder_optimizer.zero_grad()
if decoder_scheduler:
decoder_scheduler.step()
if eval_trn:
self.decode_output(output_dict, batch, task_name)
self.update_metrics(batch, output_dict, metric, task_name)
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric if eval_trn else None),
ratio_percentage=None,
ratio_width=ratio_width,
logger=logger)
del loss
del output_dict
return total_loss / timer.total
def report_metrics(self, loss, metrics: MetricDict):
return f'loss: {loss:.4f} {metrics.cstr()}' if metrics else f'loss: {loss:.4f}'
# noinspection PyMethodOverriding
@torch.no_grad()
def evaluate_dataloader(self,
data: MultiTaskDataLoader,
criterion,
metric: MetricDict,
logger,
ratio_width=None,
input: str = None,
**kwargs):
self.model.eval()
self.reset_metrics(metric)
tasks_need_custom_eval = self.config.get('tasks_need_custom_eval', None)
tasks_need_custom_eval = tasks_need_custom_eval or {}
tasks_need_custom_eval = dict((k, None) for k in tasks_need_custom_eval)
for each in tasks_need_custom_eval:
tasks_need_custom_eval[each] = data.dataloaders.pop(each)
timer = CountdownTimer(len(data) + len(tasks_need_custom_eval))
total_loss = 0
for idx, (task_name, batch) in enumerate(data):
output_dict, _ = self.feed_batch(batch, task_name)
loss = self.compute_loss(batch, output_dict[task_name]['output'], criterion[task_name],
self.tasks[task_name])
total_loss += loss.item()
self.decode_output(output_dict, batch, task_name)
self.update_metrics(batch, output_dict, metric, task_name)
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger,
ratio_width=ratio_width)
del loss
del output_dict
for task_name, dataset in tasks_need_custom_eval.items():
task = self.tasks[task_name]
decoder = self.model_.decoders[task_name]
task.evaluate_dataloader(
dataset, task.build_criterion(decoder=decoder),
metric=metric[task_name],
input=task.dev if input == 'dev' else task.tst,
split=input,
decoder=decoder,
h=functools.partial(self._encode, task_name=task_name,
cls_is_bos=task.cls_is_bos, sep_is_eos=task.sep_is_eos)
)
data.dataloaders[task_name] = dataset
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger,
ratio_width=ratio_width)
return total_loss / timer.total, metric, data
def build_model(self, training=False, **kwargs) -> torch.nn.Module:
tasks = self.tasks
encoder: ContextualWordEmbedding = self.config.encoder
transformer_module = encoder.module(training=training)
encoder_size = transformer_module.get_output_dim()
scalar_mixes = torch.nn.ModuleDict()
decoders = torch.nn.ModuleDict()
use_raw_hidden_states = dict()
for task_name, task in tasks.items():
decoder = task.build_model(encoder_size, training=training, **task.config)
assert decoder, f'Please implement `build_model` of {type(task)} to return a decoder.'
decoders[task_name] = decoder
if task.scalar_mix:
scalar_mix = task.scalar_mix.build()
scalar_mixes[task_name] = scalar_mix
# Activate scalar mix starting from 0-th layer
encoder.scalar_mix = 0
use_raw_hidden_states[task_name] = task.use_raw_hidden_states
encoder.ret_raw_hidden_states = any(use_raw_hidden_states.values())
return MultiTaskModel(transformer_module, scalar_mixes, decoders, use_raw_hidden_states)
def predict(self,
data: Union[str, List[str]],
tasks: Optional[Union[str, List[str]]] = None,
skip_tasks: Optional[Union[str, List[str]]] = None,
resolved_tasks=None,
**kwargs) -> Document:
"""Predict on data.
Args:
data: A sentence or a list of sentences.
tasks: The tasks to predict.
skip_tasks: The tasks to skip.
resolved_tasks: The resolved tasks to override ``tasks`` and ``skip_tasks``.
**kwargs: Not used.
Returns:
A :class:`~hanlp_common.document.Document`.
"""
doc = Document()
if not data:
return doc
target_tasks = resolved_tasks or self.resolve_tasks(tasks, skip_tasks)
flatten_target_tasks = [self.tasks[t] for group in target_tasks for t in group]
cls_is_bos = any([x.cls_is_bos for x in flatten_target_tasks])
sep_is_eos = any([x.sep_is_eos for x in flatten_target_tasks])
# Now build the dataloaders and execute tasks
first_task_name: str = list(target_tasks[0])[0]
first_task: Task = self.tasks[first_task_name]
encoder_transform, transform = self.build_transform(first_task)
# Override the tokenizer config of the 1st task
encoder_transform.sep_is_eos = sep_is_eos
encoder_transform.cls_is_bos = cls_is_bos
average_subwords = self.model.encoder.average_subwords
flat = first_task.input_is_flat(data)
if flat:
data = [data]
device = self.device
samples = first_task.build_samples(data, cls_is_bos=cls_is_bos, sep_is_eos=sep_is_eos)
dataloader = first_task.build_dataloader(samples, transform=transform, device=device)
results = defaultdict(list)
order = []
for batch in dataloader:
order.extend(batch[IDX])
# Run the first task, let it make the initial batch for the successors
output_dict = self.predict_task(first_task, first_task_name, batch, results, run_transform=True,
cls_is_bos=cls_is_bos, sep_is_eos=sep_is_eos)
# Run each task group in order
for group_id, group in enumerate(target_tasks):
# We could parallelize this in the future
for task_name in group:
if task_name == first_task_name:
continue
output_dict = self.predict_task(self.tasks[task_name], task_name, batch, results, output_dict,
run_transform=True, cls_is_bos=cls_is_bos, sep_is_eos=sep_is_eos)
if group_id == 0:
# We are kind of hard coding here. If the first task is a tokenizer,
# we need to convert the hidden and mask to token level
if first_task_name.startswith('tok'):
spans = []
tokens = []
output_spans = first_task.config.get('output_spans', None)
for span_per_sent, token_per_sent in zip(output_dict[first_task_name]['prediction'],
results[first_task_name][-len(batch[IDX]):]):
if output_spans:
token_per_sent = [x[0] for x in token_per_sent]
if cls_is_bos:
span_per_sent = [(-1, 0)] + span_per_sent
token_per_sent = [BOS] + token_per_sent
if sep_is_eos:
span_per_sent = span_per_sent + [(span_per_sent[-1][0] + 1, span_per_sent[-1][1] + 1)]
token_per_sent = token_per_sent + [EOS]
# The offsets start with 0 while [CLS] is zero
if average_subwords:
span_per_sent = [list(range(x[0] + 1, x[1] + 1)) for x in span_per_sent]
else:
span_per_sent = [x[0] + 1 for x in span_per_sent]
spans.append(span_per_sent)
tokens.append(token_per_sent)
spans = PadSequenceDataLoader.pad_data(spans, 0, torch.long, device=device)
output_dict['hidden'] = pick_tensor_for_each_token(output_dict['hidden'], spans,
average_subwords)
batch['token_token_span'] = spans
batch['token'] = tokens
# noinspection PyTypeChecker
batch['token_length'] = torch.tensor([len(x) for x in tokens], dtype=torch.long, device=device)
batch.pop('mask', None)
# Put results into doc in the order of tasks
for k in self.config.task_names:
v = results.get(k, None)
if v is None:
continue
doc[k] = reorder(v, order)
# Allow task to perform finalization on document
for group in target_tasks:
for task_name in group:
task = self.tasks[task_name]
task.finalize_document(doc, task_name)
# If no tok in doc, use raw input as tok
if not any(k.startswith('tok') for k in doc):
doc['tok'] = data
if flat:
for k, v in list(doc.items()):
doc[k] = v[0]
# If there is only one field, don't bother to wrap it
# if len(doc) == 1:
# return list(doc.values())[0]
return doc
def resolve_tasks(self, tasks, skip_tasks) -> List[Iterable[str]]:
# Now we decide which tasks to perform and their orders
tasks_in_topological_order = self._tasks_in_topological_order
task_topological_order = self._task_topological_order
computation_graph = self._computation_graph
target_tasks = self._resolve_task_name(tasks)
if not target_tasks:
target_tasks = tasks_in_topological_order
else:
target_topological_order = defaultdict(set)
for task_name in target_tasks:
for dependency in topological_sort(computation_graph, task_name):
target_topological_order[task_topological_order[dependency]].add(dependency)
target_tasks = [item[1] for item in sorted(target_topological_order.items())]
if skip_tasks:
skip_tasks = self._resolve_task_name(skip_tasks)
target_tasks = [x - skip_tasks for x in target_tasks]
target_tasks = [x for x in target_tasks if x]
assert target_tasks, f'No task to perform due to `tasks = {tasks}`.'
# Sort target tasks within the same group in a defined order
target_tasks = [sorted(x, key=lambda _x: self.config.task_names.index(_x)) for x in target_tasks]
return target_tasks
def predict_task(self, task: Task, output_key, batch, results, output_dict=None, run_transform=True,
cls_is_bos=True, sep_is_eos=True):
output_dict, batch = self.feed_batch(batch, output_key, output_dict, run_transform, cls_is_bos, sep_is_eos,
results)
self.decode_output(output_dict, batch, output_key)
results[output_key].extend(task.prediction_to_result(output_dict[output_key]['prediction'], batch))
return output_dict
def _resolve_task_name(self, dependencies):
resolved_dependencies = set()
if isinstance(dependencies, str):
if dependencies in self.tasks:
resolved_dependencies.add(dependencies)
elif dependencies.endswith('*'):
resolved_dependencies.update(x for x in self.tasks if x.startswith(dependencies[:-1]))
else:
prefix_matched = prefix_match(dependencies, self.config.task_names)
assert prefix_matched, f'No prefix matching for {dependencies}. ' \
f'Check your dependencies definition: {list(self.tasks.values())}'
resolved_dependencies.add(prefix_matched)
elif isinstance(dependencies, Iterable):
resolved_dependencies.update(set(chain.from_iterable(self._resolve_task_name(x) for x in dependencies)))
return resolved_dependencies
def fit(self,
encoder: Embedding,
tasks: Dict[str, Task],
save_dir,
epochs,
patience=0.5,
lr=1e-3,
encoder_lr=5e-5,
adam_epsilon=1e-8,
weight_decay=0.0,
warmup_steps=0.1,
gradient_accumulation=1,
grad_norm=5.0,
encoder_grad_norm=None,
decoder_grad_norm=None,
tau: float = 0.8,
transform=None,
# prune: Callable = None,
eval_trn=True,
prefetch=None,
tasks_need_custom_eval=None,
_device_placeholder=False,
cache=False,
devices=None,
logger=None,
seed=None,
**kwargs):
trn_data, dev_data, batch_size = 'trn', 'dev', None
task_names = list(tasks.keys())
return super().fit(**merge_locals_kwargs(locals(), kwargs, excludes=('self', 'kwargs', '__class__', 'tasks')),
**tasks)
# noinspection PyAttributeOutsideInit
def on_config_ready(self, **kwargs):
self.tasks = dict((key, task) for key, task in self.config.items() if isinstance(task, Task))
computation_graph = dict()
for task_name, task in self.tasks.items():
dependencies = task.dependencies
resolved_dependencies = self._resolve_task_name(dependencies)
computation_graph[task_name] = resolved_dependencies
# We can cache this order
tasks_in_topological_order = list(toposort(computation_graph))
task_topological_order = dict()
for i, group in enumerate(tasks_in_topological_order):
for task_name in group:
task_topological_order[task_name] = i
self._tasks_in_topological_order = tasks_in_topological_order
self._task_topological_order = task_topological_order
self._computation_graph = computation_graph
@staticmethod
def reset_metrics(metrics: Dict[str, Metric]):
for metric in metrics.values():
metric.reset()
def feed_batch(self,
batch: Dict[str, Any],
task_name,
output_dict=None,
run_transform=False,
cls_is_bos=False,
sep_is_eos=False,
results=None) -> Tuple[Dict[str, Any], Dict[str, Any]]:
h, output_dict = self._encode(batch, task_name, output_dict, cls_is_bos, sep_is_eos)
task = self.tasks[task_name]
if run_transform:
batch = task.transform_batch(batch, results=results, cls_is_bos=cls_is_bos, sep_is_eos=sep_is_eos)
batch['mask'] = mask = hanlp.utils.torch_util.lengths_to_mask(batch['token_length'])
output_dict[task_name] = {
'output': task.feed_batch(h,
batch=batch,
mask=mask,
decoder=self.model.decoders[task_name]),
'mask': mask
}
return output_dict, batch
def _encode(self, batch, task_name, output_dict=None, cls_is_bos=False, sep_is_eos=False):
model = self.model
if output_dict:
hidden, raw_hidden = output_dict['hidden'], output_dict['raw_hidden']
else:
hidden = model.encoder(batch)
if isinstance(hidden, tuple):
hidden, raw_hidden = hidden
else:
raw_hidden = None
output_dict = {'hidden': hidden, 'raw_hidden': raw_hidden}
hidden_states = raw_hidden if model.use_raw_hidden_states[task_name] else hidden
if task_name in model.scalar_mixes:
scalar_mix = model.scalar_mixes[task_name]
h = scalar_mix(hidden_states)
else:
if model.scalar_mixes: # If any task enables scalar_mix, hidden_states will be a 4d tensor
hidden_states = hidden_states[-1, :, :, :]
h = hidden_states
# If the task doesn't need cls while h has cls, remove cls
task = self.tasks[task_name]
if cls_is_bos and not task.cls_is_bos:
h = h[:, 1:, :]
if sep_is_eos and not task.sep_is_eos:
h = h[:, :-1, :]
return h, output_dict
def decode_output(self, output_dict, batch, task_name=None):
if not task_name:
for task_name, task in self.tasks.items():
output_per_task = output_dict.get(task_name, None)
if output_per_task is not None:
output_per_task['prediction'] = task.decode_output(
output_per_task['output'],
output_per_task['mask'],
batch, self.model.decoders[task_name])
else:
output_per_task = output_dict[task_name]
output_per_task['prediction'] = self.tasks[task_name].decode_output(
output_per_task['output'],
output_per_task['mask'],
batch,
self.model.decoders[task_name])
def update_metrics(self, batch: Dict[str, Any], output_dict: Dict[str, Any], metrics: MetricDict, task_name):
task = self.tasks[task_name]
output_per_task = output_dict.get(task_name, None)
if output_per_task:
output = output_per_task['output']
prediction = output_per_task['prediction']
metric = metrics.get(task_name, None)
task.update_metrics(batch, output, prediction, metric)
def compute_loss(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
criterion: Callable,
task: Task) -> torch.FloatTensor:
return task.compute_loss(batch, output, criterion)
def evaluate(self, save_dir=None, logger: logging.Logger = None, batch_size=None, output=False, **kwargs):
rets = super().evaluate('tst', save_dir, logger, batch_size, output, **kwargs)
tst = rets[-1]
self._close_dataloader(tst)
return rets
def save_vocabs(self, save_dir, filename='vocabs.json'):
for task_name, task in self.tasks.items():
task.save_vocabs(save_dir, f'{task_name}_{filename}')
def load_vocabs(self, save_dir, filename='vocabs.json'):
for task_name, task in self.tasks.items():
task.load_vocabs(save_dir, f'{task_name}_{filename}')
def parallelize(self, devices: List[Union[int, torch.device]]):
raise NotImplementedError('Parallelization is not implemented yet.')
def __call__(self, data, **kwargs) -> Document:
return super().__call__(data, **kwargs)
def __getitem__(self, task_name: str) -> Task:
return self.tasks[task_name]
def __delitem__(self, task_name: str):
"""Delete a task (and every resource it owns) from this component.
Args:
task_name: The name of the task to be deleted.
Examples:
>>> del mtl['dep'] # Delete dep from MTL
"""
del self.config[task_name]
self.config.task_names.remove(task_name)
del self.tasks[task_name]
del self.model.decoders[task_name]
del self._computation_graph[task_name]
self._task_topological_order.pop(task_name)
for group in self._tasks_in_topological_order:
group: set = group
group.discard(task_name)
def __repr__(self):
return repr(self.config)
def items(self):
yield from self.tasks.items()
| hankcs/HanLP | hanlp/components/mtl/multi_task_learning.py | Python | apache-2.0 | 38,042 | 0.00276 |
import numpy as np
from scipy import signal
import math
def norm_matrix(matrix):
for i in range(np.shape(matrix)[0]):
if np.max(matrix[i]) <= 0:
matrix[i] = matrix[i]
else:
matrix[i] /= np.max(matrix[i])
return matrix
def pool_boundaries(boundaries, filter_size, coeff):
"""
Parameters
----------
boundaries : matrix conainting the weights of the boundaries
filter_size : define the size of the pooling
coeff : define the strength coefficient of the pooling
Returns : new matrix of boundaries
-------
"""
pool = np.zeros(np.shape(boundaries))
size_filters = np.arange(filter_size) + 1
weight_pooling = [0.1, 0.08, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05] * coeff
for i,size_filter in enumerate(size_filters):
#vertical pooling
pool[:, :-size_filter,:] += weight_pooling[i] * boundaries[:, size_filter:, :]
pool[:, size_filter:, :] += weight_pooling[i] * boundaries[:, :-size_filter,:]
#horizontal pooling
pool[:, :, :-size_filter] += weight_pooling[i] * boundaries[:, :, size_filter:]
pool[:, :, size_filter:] += weight_pooling[i] * boundaries[:, :, :-size_filter]
pool[pool < 0] = 0
return boundaries + pool
def pool_shade_boundaries(boundaries):
pool = np.zeros(np.shape(boundaries))
size_filters = [1, 2, 3]
weight_pooling = [1.5, 1, 1]
# weight_pooling = [.5, .7, .3]
# size_filters = [1]
# weight_pooling = [1]
for k, size_filter in enumerate(size_filters):
for i in range(size_filter, np.shape(boundaries)[1] - size_filter):
for j in range(size_filter, np.shape(boundaries)[2] - size_filter):
pool[0, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[2, i - size_filter + 1, j - 1], boundaries[2, i + size_filter, j]]) -
np.mean([boundaries[2, i - size_filter + 1, j], boundaries[2, i + size_filter, j - 1]])
))
pool[0, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[3, i - size_filter + 1, j], boundaries[3, i + size_filter, j + 1]]) -
np.mean([boundaries[3, i - size_filter + 1, j + 1], boundaries[3, i + size_filter, j]])
))
pool[1, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[2, i - size_filter, j - 1], boundaries[2, i + size_filter - 1, j]]) -
np.mean([boundaries[2, i - size_filter, j], boundaries[2, i + size_filter - 1, j - 1]])
))
pool[1, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[3, i - size_filter, j], boundaries[3, i + size_filter - 1, j + 1]]) -
np.mean([boundaries[3, i - size_filter, j + 1], boundaries[3, i + size_filter - 1, j]])
))
pool[2, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[0, i - 1, j - size_filter + 1], boundaries[0, i, j + size_filter]]) -
np.mean([boundaries[0, i - 1, j + size_filter], boundaries[0, i, j - size_filter + 1]])
))
pool[2, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[1, i, j - size_filter + 1], boundaries[1, i + 1, j + size_filter]]) -
np.mean([boundaries[1, i, j + size_filter], boundaries[1, i + 1, j - size_filter + 1]])
))
pool[3, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[0, i - 1, j - size_filter + 1], boundaries[0, i, j + size_filter]]) -
np.mean([boundaries[0, i - 1, j + size_filter], boundaries[0, i, j - size_filter + 1]])
))
pool[3, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[1, i, j - size_filter], boundaries[1, i + 1, j + size_filter - 1]]) -
np.mean([boundaries[1, i, j + size_filter - 1], boundaries[1, i + 1, j - size_filter]])
))
return boundaries + pool
def rem_iner_bound(input, boundaries, tresh):
"""
Parameters
----------
input : image input
boundaries : matrix containing the weights of the boundaries
tresh : threshold used as a sensibility coefficient, small threshold means high sensibility
Returns the matrix of boundaries
-------
"""
pool = np.copy(boundaries)
for i in range(np.shape(input)[0] - 1):
for j in range(np.shape(input)[1] - 1):
patch = input[i:i + 2, j:j + 2]
min = np.min(patch)
max = np.max(patch)
diff = max-min
mean = np.mean(patch)
if 0 <= min:
#remove boundaries of background and very similar colors
if diff < 0.05:
boundaries[0, i, j:j + 2] -= pool[0, i, j:j + 2]
boundaries[1, i + 1, j:j + 2] -= pool[1, i + 1, j:j + 2]
boundaries[2, i:i + 2, j] -= pool[2, i:i + 2, j]
boundaries[3, i:i + 2, j + 1] -= pool[3, i:i + 2, j + 1]
else:
if mean > 0.5 and diff < tresh:
boundaries[0, i, j:j + 2] -= pool[0, i, j:j + 2]
boundaries[1, i + 1, j:j + 2] -= pool[1, i + 1, j:j + 2]
boundaries[2, i:i + 2, j] -= pool[2, i:i + 2, j]
boundaries[3, i:i + 2, j + 1] -= pool[3, i:i + 2, j + 1]
boundaries[boundaries < 0] = 0
return boundaries
def rem_inner_seg_bound(input, boundaries):
for i in range(np.shape(input)[0] - 1):
for j in range(np.shape(input)[1] - 1):
patch = input[i:i + 2, j:j + 2]
neg = patch[patch < 0]
if np.shape(neg)[0] == 4:
boundaries[0, i, j:j + 2] = 0
boundaries[1, i + 1, j:j + 2] = 0
boundaries[2, i:i + 2, j] = 0
boundaries[3, i:i + 2, j + 1] = 0
def choose_loc(x, y, dir):
"""
Return the position of the next pixel in function of the direction one want to visit
Parameters
----------
x
y
dir
Returns
-------
"""
if dir == 0:
return [x-1,y]
elif dir == 1:
return [x+1,y]
elif dir == 2:
return [x, y-1]
elif dir == 3:
return [x, y+1]
def calculate_pixel(input, seg_img, boundaries, loc, thresh_bound):
direction = []
for dir in range(4):
pos = choose_loc(loc[0], loc[1], dir)
if 0 <= pos[0] < np.shape(input)[0] and 0 <= pos[1] < np.shape(input)[1]:
if boundaries[dir, pos[0], pos[1]] < thresh_bound:
if input[pos[0], pos[1]] > 0:
direction.append(dir)
elif seg_img[pos[0], pos[1]] > 0:
direction.append(dir)
for dir in direction:
pos = choose_loc(loc[0], loc[1], dir)
if input[pos[0],pos[1]] > 0:
seg_img[loc[0], loc[1]] += (1 / np.shape(direction)[0]) * input[pos[0], pos[1]]
else:
seg_img[loc[0], loc[1]] += (1 / np.shape(direction)[0]) * seg_img[pos[0], pos[1]]
def fill_pixel(visited_pixel, input, bound, seg_img, thresh_bound):
#fill pixels with the real pixel values from images
for i in range(np.shape(input)[0]):
for j in range(np.shape(input)[1]):
if visited_pixel[i, j] == 1 and input[i, j] > 0:
seg_img[i, j] = input[i, j]
#fill pixels of segmented images
#todo find a better way than this double loop
#todo perhaps need to do this in the four direction for gradients colors?
#top to down and left to right
for i in range(np.shape(input)[0]):
for j in range(np.shape(input)[1]):
if visited_pixel[i, j] == 1 and input[i, j] < 0:
calculate_pixel(input, seg_img, bound, [i, j], thresh_bound)
#bottom -> top right -> left filling for remaining pixels
for i in range(np.shape(input)[0] - 1, -1, -1):
for j in range(np.shape(input)[1] - 1, -1, -1):
if visited_pixel[i, j] == 1 and input[i, j] < 0 and seg_img[i, j] == 0:
calculate_pixel(input, seg_img, bound, [i, j], thresh_bound)
# # right -> left top -> bottom filling for remaining pixels
# for i in range(np.shape(input)[1]):
# for j in range(np.shape(input)[0] - 1, -1, -1):
# if visited_pixel[j, i] == 1 and input[j, i] < 0 and seg_img[j, i] == 0:
# calculate_pixel(input, seg_img, bound, [j, i], thresh_bound)
def fill_shape(visited_pixel, input, boundaries, seg_img, seg_bound, loc, thresh_bound, num_iter):
if 0 <= loc[0] < np.shape(visited_pixel)[0] and 0 <= loc[1] < np.shape(visited_pixel)[1]:
visited_pixel[int(loc[0]), int(loc[1])] = 1
num_iter += 1
# dir = 0 go top
# dir = 1 go down
# dir = 2 go left
# dir = 3 go right
for dir in range(4):
new_loc = choose_loc(loc[0], loc[1], dir)
#verify if the next pixel is not out of range
if 0 <= new_loc[0] < np.shape(visited_pixel)[0] and 0 <= new_loc[1] < np.shape(visited_pixel)[1]:
if boundaries[int(dir), int(new_loc[0]), int(new_loc[1])] > thresh_bound:
seg_bound[int(dir), int(new_loc[0]), int(new_loc[1])] = boundaries[int(dir), int(new_loc[0]), int(new_loc[1])]
else:
if not visited_pixel[int(new_loc[0]), int(new_loc[1])]:
fill_shape(visited_pixel, input, boundaries, seg_img, seg_bound, new_loc, thresh_bound, num_iter)
def define_contrast_edge_boundaries(boundary, positive):
# this method make the assumption that the object is always in the center of the picture
if positive:
#control that there is boundaries: test cases
if np.shape(np.nonzero(boundary)[0])[0] != 0:
if boundary[np.nonzero(boundary)[0][0],np.nonzero(boundary)[1][0]] > 0:
copy = np.copy(boundary)
copy[copy <= 0] = 0
return copy
else:
copy = np.copy(boundary)
copy *= -1
copy[copy <= 0] = 0
return copy
else:
return boundary
else:
#control that there is boundaries: test cases
if np.shape(np.nonzero(boundary)[0])[0] != 0:
if boundary[np.nonzero(boundary)[0][0],np.nonzero(boundary)[1][0]] > 0:
copy = np.copy(boundary)
copy[copy <= 0] = 0
return copy
else:
copy = np.copy(boundary)
copy *= -1
copy[copy <= 0] = 0
return copy
else:
return boundary
def find_start_bound(boundaries):
"""
This function take the maximum contrasted edges and return the position of the edge
Parameters
----------
boundaries
Returns
-------
"""
max_contrast_bound = np.argmax(boundaries)
shape = np.shape(boundaries)
start_bound = [0, 0, 0]
start_bound[0] = math.floor(max_contrast_bound / (shape[1] * shape[2]))
rest = max_contrast_bound - start_bound[0] * shape[1] * shape[2]
start_bound[1] = math.floor(rest / shape[2])
start_bound[2] = rest - start_bound[1] * shape[2]
return start_bound
def choose_next_bound(x, y, dir, bound):
"""
Returns the next boundary position in function of the nature of the primary boundary (up, bottom, left, right) and
take care of the direction
Parameters
----------
x
y
dir
bound: matrix containing all the boundaries
Returns
-------
"""
#top bound
if bound == 0:
#look for bottom right
if dir == 0:
return [3, x + 1, y + 1]
#look for right top
elif dir == 1:
return [0, x, y + 1]
# look for top right
elif dir == 2:
return [2, x, y]
#look for right left
elif dir == 3:
return [2, x + 1, y - 1]
#look for left top
elif dir == 4:
return [0, x, y - 1]
#look for top left
else:
return [3, x, y]
#down bound
elif bound == 1:
#look for top left
if dir == 0:
return [2, x - 1, y - 1]
#look for left
elif dir == 1:
return [1, x, y - 1]
#loof for bottom right
elif dir == 2:
return [3, x, y]
#look for bottom right
elif dir == 3:
return [3, x - 1, y + 1]
#look for down
elif dir == 4:
return [1, x, y + 1]
#look for top right
else:
return [2, x, y]
#left boundaries
elif bound == 2:
# look for top right
if dir == 0:
return [0, x - 1, y + 1]
# look for left top
elif dir == 1:
return [2, x - 1, y]
#loof for top left
elif dir == 2:
return [1, x, y]
# look for bottom right
elif dir == 3:
return [1, x + 1, y + 1]
# look for left bottom
elif dir == 4:
return [2, x + 1, y]
# look for bottom left
else:
return [0, x, y]
#right boundaries
else:
# look for bottom left
if dir == 0:
return [1, x + 1, y - 1]
# look for right bottom
elif dir == 1:
return [3, x + 1, y]
# look for bottom right
elif dir == 2:
return [0, x, y]
# look for top left
elif dir == 3:
return [0, x - 1, y - 1]
# look for right top
elif dir == 4:
return [3, x - 1, y]
# look for top right
else:
return [1, x, y]
def find_next_boundaries(boundaries, loc, clockwise, thresh_bound, print_lab=False):
out_of_bound = False
# print()
# print("location: ", loc)
if clockwise:
for dir in range(0, 3):
new_loc = choose_next_bound(loc[1], loc[2], dir, loc[0])
if 0 <= new_loc[1] < np.shape(boundaries)[1] and 0 <= new_loc[2] < np.shape(boundaries)[2]:
if print_lab:
print("new loc", dir, new_loc, boundaries[new_loc[0], new_loc[1], new_loc[2]])
if boundaries[int(new_loc[0]), int(new_loc[1]), int(new_loc[2])] > thresh_bound:
return True, new_loc, False
else:
out_of_bound = True
else:
for dir in range(3, 6):
new_loc = choose_next_bound(loc[1], loc[2], dir, loc[0])
if 0 <= new_loc[1] < np.shape(boundaries)[1] and 0 <= new_loc[2] < np.shape(boundaries)[2]:
if print_lab:
print("new loc", dir, new_loc, boundaries[new_loc[0], new_loc[1], new_loc[2]])
if boundaries[int(new_loc[0]), int(new_loc[1]), int(new_loc[2])] > thresh_bound:
return True, new_loc, False
else:
out_of_bound = True
return False, loc, out_of_bound
def get_boundaries(input):
image_height = np.shape(input)[0]
image_width = np.shape(input)[1]
# boundaries are vertical left = 0 vertical right = 1 and horizontal left = 2 horizontal right = 3
boundaries = np.zeros((4, image_height, image_width))
# set up boundaries filter
# v1_hori_left = [[0,0,0],[1,-.2,-.8],[0,0,0]]
# v1_hori_right = [[0,0,0],[-.8,-.2,1],[0,0,0]]
v1_hori_left = [[0, 0, 0, 0, 0], [0.2, 1, -1.2, 0, 0], [0, 0, 0, 0, 0]]
v1_hori_right = [[0, 0, 0, 0, 0], [0, 0, -1.2, 1, 0.2], [0, 0, 0, 0, 0]]
v1_vert_top = np.transpose(v1_hori_left)
v1_vert_down = np.transpose(v1_hori_right)
# pass boundaries filter for each orientations
filters = np.zeros((4, image_height, image_width))
filters[0, :, :] = signal.convolve2d(input, v1_vert_top, boundary='symm', mode='same')
filters[1, :, :] = signal.convolve2d(input, v1_vert_down, boundary='symm', mode='same')
filters[2, :, :] = signal.convolve2d(input, v1_hori_left, boundary='symm', mode='same')
filters[3, :, :] = signal.convolve2d(input, v1_hori_right, boundary='symm', mode='same')
filters[filters < 0.00001] = 0
boundaries[0, :, :] = define_contrast_edge_boundaries(filters[0, :, :], True)
boundaries[1, :, :] = define_contrast_edge_boundaries(filters[1, :, :], False)
boundaries[2, :, :] = define_contrast_edge_boundaries(filters[2, :, :], True)
boundaries[3, :, :] = define_contrast_edge_boundaries(filters[3, :, :], False)
return boundaries
| michaelStettler/HISI | HISI/boundaries.py | Python | mit | 16,880 | 0.005746 |
from collections.abc import Sequence
from numbers import Number
from . import Validator, Length, Range, Instance
from .compound import All
class Latitude(All):
"""Validate the given value as a number between -90 and +90 in decimal degrees, representing latitude."""
validators = [
Instance(Number),
Range(-90, 90)
]
latitude = Latitude()
class Longitude(All):
"""Validate the given value as a number between -180 and +180 in decimal degrees, representing longitude."""
validators = [
Instance(Number),
Range(-180, 180)
]
longitude = Longitude()
class Position(All):
"""Validate the given value as any sequence of exactly two elements representing latitude and longitude."""
validators = [
Instance(Sequence),
Length(slice(2, 3)) # exactly two elements long
]
def validate(self, value, context=None):
value = super().validate(value, context)
_lat, _long = value
latitude.validate(_lat)
longitude.validate(_long)
return value
position = Position()
| marrow/schema | marrow/schema/validate/geo.py | Python | mit | 1,013 | 0.0385 |
import time
import collections
from overviewbot import OverviewBot, Window
def format_time(timestamp):
return time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime(timestamp))
class CustomWindow(Window):
def __init__(self, input_key, *args, **keys):
Window.__init__(self, *args, **keys)
self.input_key = input_key
self.values = dict()
def added(self, event):
for value in event.values(self.input_key):
self.values[value] = self.values.get(value, 0) + 1
def discarded(self, event):
for value in event.values(self.input_key):
self.values[value] = self.values.get(value, 0) - 1
if self.values[value] <= 0:
del self.values[value]
def value(self):
if not self.values:
return None
if len(self.values) == 1:
return self.values.keys()[0]
return "%d unique values" % len(self.values)
class _Seen(Window):
def __init__(self, *args, **keys):
Window.__init__(self, *args, **keys)
self._times = collections.deque()
def added(self, _):
self._times.append(time.time())
def discarded(self, _):
self._times.popleft()
def _firstseen(self):
if self._times:
return format_time(self._times[0])
return None
def _lastseen(self):
if self._times:
return format_time(self._times[-1])
return None
class FirstSeen(_Seen):
def value(self):
return self._firstseen()
class LastSeen(_Seen):
def value(self):
return self._lastseen()
class CustomOverviewBot(OverviewBot):
def aggregates(self):
result = dict(OverviewBot.aggregates(self))
result["custom"] = CustomWindow
result["firstseen"] = FirstSeen
result["lastseen"] = LastSeen
return result
if __name__ == "__main__":
CustomOverviewBot.from_command_line().execute()
| softcert/vsroom | vsroom/common/customoverview.py | Python | mit | 1,942 | 0.003605 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from processors.hra.extractors import _clean_identifier
from processors.hra.extractors import _url_from_title
# Tests
def test_clean_identifier():
assert _clean_identifier('NCT12345678', prefix='NCT') == 'NCT12345678'
assert _clean_identifier('12345678', prefix='NCT') == 'NCT12345678'
assert _clean_identifier('ISRCTN12345678', prefix='ISRCTN') == 'ISRCTN12345678'
assert _clean_identifier('12345678', prefix='ISRCTN') == 'ISRCTN12345678'
assert _clean_identifier('n/a', prefix='NCT') == None
def test_url_from_title():
title = 'Longterm F/U study of BOTOX® in Idiopathic Overactive Bladder patients'
expected_url = 'http://www.hra.nhs.uk/news/research-summaries/longterm-fu-study-of-botox-in-idiopathic-overactive-bladder-patients'
assert _url_from_title(title) == expected_url
| arthurSena/processors | tests/test_hra.py | Python | mit | 992 | 0.004036 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from openstack import exceptions
from heat.common import template_format
from heat.engine.clients.os import senlin
from heat.engine.resources.openstack.senlin import receiver as sr
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
receiver_stack_template = """
heat_template_version: 2016-04-08
description: Senlin Receiver Template
resources:
senlin-receiver:
type: OS::Senlin::Receiver
properties:
name: SenlinReceiver
cluster: fake_cluster
action: CLUSTER_SCALE_OUT
type: webhook
params:
foo: bar
"""
class FakeReceiver(object):
def __init__(self, id='some_id'):
self.id = id
self.name = "SenlinReceiver"
self.cluster_id = "fake_cluster"
self.action = "CLUSTER_SCALE_OUT"
self.channel = {'alarm_url': "http://foo.bar/webhooks/fake_url"}
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'cluster_id': self.cluster_id,
'action': self.action,
'channel': self.channel,
'actor': {'trust_id': ['fake_trust_id']}
}
class SenlinReceiverTest(common.HeatTestCase):
def setUp(self):
super(SenlinReceiverTest, self).setUp()
self.senlin_mock = mock.MagicMock()
self.patchobject(sr.Receiver, 'client',
return_value=self.senlin_mock)
self.patchobject(senlin.ClusterConstraint, 'validate',
return_value=True)
self.fake_r = FakeReceiver()
self.t = template_format.parse(receiver_stack_template)
def _init_recv(self, template):
self.stack = utils.parse_stack(template)
recv = self.stack['senlin-receiver']
return recv
def _create_recv(self, template):
recv = self._init_recv(template)
self.senlin_mock.create_receiver.return_value = self.fake_r
self.senlin_mock.get_receiver.return_value = self.fake_r
scheduler.TaskRunner(recv.create)()
self.assertEqual((recv.CREATE, recv.COMPLETE),
recv.state)
self.assertEqual(self.fake_r.id, recv.resource_id)
return recv
def test_recv_create_success(self):
self._create_recv(self.t)
expect_kwargs = {
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'type': 'webhook',
'params': {'foo': 'bar'},
}
self.senlin_mock.create_receiver.assert_called_once_with(
**expect_kwargs)
def test_recv_delete_success(self):
self.senlin_mock.delete_receiver.return_value = None
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_recv_delete_not_found(self):
self.senlin_mock.delete_receiver.side_effect = [
exceptions.ResourceNotFound(http_status=404)
]
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_cluster_resolve_attribute(self):
excepted_show = {
'id': 'some_id',
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'channel': {'alarm_url': "http://foo.bar/webhooks/fake_url"},
'actor': {'trust_id': ['fake_trust_id']}
}
recv = self._create_recv(self.t)
self.assertEqual(self.fake_r.channel,
recv._resolve_attribute('channel'))
self.assertEqual(excepted_show,
recv._show_resource())
| noironetworks/heat | heat/tests/openstack/senlin/test_receiver.py | Python | apache-2.0 | 4,398 | 0 |
from django import forms
attr = {'class': 'form-control'}
class GroupSeedForm(forms.Form):
seed = forms.CharField(label='Seed', max_length=1337, initial='none', widget=forms.TextInput(attrs=attr))
class UserSeedForm(forms.Form):
pseudonym = forms.CharField(label='Pseudonym', min_length=3, widget=forms.TextInput(attrs=attr))
password = forms.CharField(label='Password', widget=forms.PasswordInput(attrs=attr))
| ZenifiedFromI2P/antisocial | bootstraps/forms.py | Python | gpl-3.0 | 426 | 0.011737 |
# -*- coding: utf-8 -*-
'''
Created on 2012-7-3
@author: lihao
'''
try: import httplib
except ImportError:
import http.client as httplib
import urllib
import time
import hashlib
import json
import top
import itertools
import mimetypes
'''
定义一些系统变量
'''
SYSTEM_GENERATE_VERSION = "taobao-sdk-python-20151214"
P_APPKEY = "app_key"
P_API = "method"
P_SESSION = "session"
P_ACCESS_TOKEN = "access_token"
P_VERSION = "v"
P_FORMAT = "format"
P_TIMESTAMP = "timestamp"
P_SIGN = "sign"
P_SIGN_METHOD = "sign_method"
P_PARTNER_ID = "partner_id"
P_CODE = 'code'
P_SUB_CODE = 'sub_code'
P_MSG = 'msg'
P_SUB_MSG = 'sub_msg'
N_REST = '/router/rest'
def sign(secret, parameters):
#===========================================================================
# '''签名方法
# @param secret: 签名需要的密钥
# @param parameters: 支持字典和string两种
# '''
#===========================================================================
# 如果parameters 是字典类的话
if hasattr(parameters, "items"):
keys = parameters.keys()
keys.sort()
parameters = "%s%s%s" % (secret,
str().join('%s%s' % (key, parameters[key]) for key in keys),
secret)
sign = hashlib.md5(parameters).hexdigest().upper()
return sign
def mixStr(pstr):
if(isinstance(pstr, str)):
return pstr
elif(isinstance(pstr, unicode)):
return pstr.encode('utf-8')
else:
return str(pstr)
class FileItem(object):
def __init__(self,filename=None,content=None):
self.filename = filename
self.content = content
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = "PYTHON_SDK_BOUNDARY"
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, str(value)))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((mixStr(fieldname), mixStr(filename), mixStr(mimetype), mixStr(body)))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'Content-Type: text/plain; charset=UTF-8',
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'Content-Transfer-Encoding: binary',
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
class TopException(Exception):
#===========================================================================
# 业务异常类
#===========================================================================
def __init__(self):
self.errorcode = None
self.message = None
self.subcode = None
self.submsg = None
self.application_host = None
self.service_host = None
def __str__(self, *args, **kwargs):
sb = "errorcode=" + mixStr(self.errorcode) +\
" message=" + mixStr(self.message) +\
" subcode=" + mixStr(self.subcode) +\
" submsg=" + mixStr(self.submsg) +\
" application_host=" + mixStr(self.application_host) +\
" service_host=" + mixStr(self.service_host)
return sb
class RequestException(Exception):
#===========================================================================
# 请求连接异常类
#===========================================================================
pass
class RestApi(object):
#===========================================================================
# Rest api的基类
#===========================================================================
def __init__(self, domain='gw.api.taobao.com', port = 80):
#=======================================================================
# 初始化基类
# Args @param domain: 请求的域名或者ip
# @param port: 请求的端口
#=======================================================================
self.__domain = domain
self.__port = port
self.__httpmethod = "POST"
if(top.getDefaultAppInfo()):
self.__app_key = top.getDefaultAppInfo().appkey
self.__secret = top.getDefaultAppInfo().secret
def get_request_header(self):
return {
'Content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
}
def set_app_info(self, appinfo):
#=======================================================================
# 设置请求的app信息
# @param appinfo: import top
# appinfo top.appinfo(appkey,secret)
#=======================================================================
self.__app_key = appinfo.appkey
self.__secret = appinfo.secret
def getapiname(self):
return ""
def getMultipartParas(self):
return [];
def getTranslateParas(self):
return {};
def _check_requst(self):
pass
def getResponse(self, authrize=None, timeout=30):
#=======================================================================
# 获取response结果
#=======================================================================
connection = httplib.HTTPConnection(self.__domain, self.__port, False, timeout)
sys_parameters = {
P_FORMAT: 'json',
P_APPKEY: self.__app_key,
P_SIGN_METHOD: "md5",
P_VERSION: '2.0',
P_TIMESTAMP: str(long(time.time() * 1000)),
P_PARTNER_ID: SYSTEM_GENERATE_VERSION,
P_API: self.getapiname(),
}
if authrize is not None:
sys_parameters[P_SESSION] = authrize
application_parameter = self.getApplicationParameters()
sign_parameter = sys_parameters.copy()
sign_parameter.update(application_parameter)
sys_parameters[P_SIGN] = sign(self.__secret, sign_parameter)
connection.connect()
header = self.get_request_header();
if(self.getMultipartParas()):
form = MultiPartForm()
for key, value in application_parameter.items():
form.add_field(key, value)
for key in self.getMultipartParas():
fileitem = getattr(self,key)
if(fileitem and isinstance(fileitem,FileItem)):
form.add_file(key,fileitem.filename,fileitem.content)
body = str(form)
header['Content-type'] = form.get_content_type()
else:
body = urllib.urlencode(application_parameter)
url = N_REST + "?" + urllib.urlencode(sys_parameters)
connection.request(self.__httpmethod, url, body=body, headers=header)
response = connection.getresponse();
if response.status is not 200:
raise RequestException('invalid http status ' + str(response.status) + ',detail body:' + response.read())
result = response.read()
jsonobj = json.loads(result)
if jsonobj.has_key("error_response"):
error = TopException()
if jsonobj["error_response"].has_key(P_CODE) :
error.errorcode = jsonobj["error_response"][P_CODE]
if jsonobj["error_response"].has_key(P_MSG) :
error.message = jsonobj["error_response"][P_MSG]
if jsonobj["error_response"].has_key(P_SUB_CODE) :
error.subcode = jsonobj["error_response"][P_SUB_CODE]
if jsonobj["error_response"].has_key(P_SUB_MSG) :
error.submsg = jsonobj["error_response"][P_SUB_MSG]
error.application_host = response.getheader("Application-Host", "")
error.service_host = response.getheader("Location-Host", "")
raise error
return jsonobj
def getApplicationParameters(self):
application_parameter = {}
for key, value in self.__dict__.iteritems():
if not key.startswith("__") and not key in self.getMultipartParas() and not key.startswith("_RestApi__") and value is not None :
if(key.startswith("_")):
application_parameter[key[1:]] = value
else:
application_parameter[key] = value
#查询翻译字典来规避一些关键字属性
translate_parameter = self.getTranslateParas()
for key, value in application_parameter.iteritems():
if key in translate_parameter:
application_parameter[translate_parameter[key]] = application_parameter[key]
del application_parameter[key]
return application_parameter
| BillBillBillBill/WishTalk-server | WishTalk/top/api/base.py | Python | mit | 10,416 | 0.009482 |
__author__ = 'Liam'
import types
def flag(func):
func.is_flag = True
return func
class BadSearchOp(Exception):
def __init__(self, value = "bad search operation"):
self.value = value
def __str__(self):
return "BadSearchOp: %s" % self.value
class ImapSearchQueryParser(object):
"""
Receives a list of commands for the IMAP V4 search
and returns a dictionary of the commands, that can be used in various mail API's
including walla API for mail
based on RFC3501:
https://tools.ietf.org/html/rfc3501#section-6.4.4
example of commands:
C: A282 SEARCH FLAGGED SINCE 1-Feb-1994 NOT FROM "Smith"
S: * SEARCH 2 84 882
S: A282 OK SEARCH completed
C: A283 SEARCH TEXT "string not in mailbox"
S: * SEARCH
S: A283 OK SEARCH completed
C: A284 SEARCH CHARSET UTF-8 TEXT {6}
C: XXXXXX
S: * SEARCH 43
S: A284 OK SEARCH completed
"""
def __init__(self):
"""
:param query:
:return:
"""
#self.log("{} constructor ".format(self.__class__.__name__))
self.opFunctionList = [x for x,y in self.__class__.__dict__.items() if type(y) == types.FunctionType]
self.query = None
self.commands = {}
self.commands_list = []
#self.__validate()
#########################################################################
#
def __repr__(self):
return self.__class__.__name__+", commands: %s" % self.commands
def log(self,msg):
print msg
#self.logger.log(logging.DEBUG,msg)
def __str__(self):
return str(self.commands)
def _update_command_list(self, command, idx1, idx2=None):
"""
Updates both the command list and commands as to prepare for OR parsing
:param command: a single dictionary object with one key:value (command:argument)
:param idx1: first index
:param idx2: second index
:return:
"""
command_wrapper = {
'data': command,
'pos': [idx1]
}
# update second position
if idx2:
command_wrapper['pos'].append(idx2)
# adding to command list with positions of current command and argument
self.commands_list.append(command_wrapper)
# update the command
self.commands.update(command)
@flag
def OP__ALL(self,currentIndex=None):
self._update_command_list({'all': True}, currentIndex)
@flag
def OP__ANSWERED(self,currentIndex=None):
self._update_command_list({'answered': True}, currentIndex)
def OP__BCC(self,currentIndex=None):
"""
BCC <string>
Messages that contain the specified string in the envelope
structure's BCC field.
:param currentIndex:
:return:
"""
if currentIndex+1 < len(self.query):
#todo check bcc validation
self._update_command_list({'bcc': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BCC" provided but with no argument in query list')
def OP__BEFORE(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'before': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BEFORE" provided but with no argument in query list')
def OP__BODY(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'body': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BODY" provided but with no argument in query list')
def OP__CC(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'cc': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "CC" provided but with no argument in query list')
@flag
def OP__DELETED(self,currentIndex=None):
self._update_command_list({'deleted': True}, currentIndex)
@flag
def OP__DRAFT(self,currentIndex=None):
self._update_command_list({'draft': True}, currentIndex)
@flag
def OP__FLAGGED(self,currentIndex=None):
self._update_command_list({'flagged': True}, currentIndex)
def OP__FROM(self,currentIndex=None):
"""
FROM <string>
Messages that contain the specified string in the envelope
structure's FROM field.
:return:
"""
# assuming that next item is the value, such as: FROM '[email protected]'
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'from': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "FROM" provided but with no argument in query list')
def OP__HEADER(self,currentIndex=None):
# todo work on this one
pass
def OP__KEYWORD(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'keyword': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "KEYWORD" provided but with no argument in query list')
def OP__LARGER(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'larger': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "LARGER" provided but with no argument in query list')
@flag
def OP__NEW(self,currentIndex=None):
self._update_command_list({'new': True}, currentIndex)
@flag
def OP__OLD(self,currentIndex=None):
self._update_command_list({'old': True}, currentIndex)
@flag
def OP__RECENT(self,currentIndex=None):
self._update_command_list({'recet': True}, currentIndex)
@flag
def OP__SEEN(self,currentIndex=None):
self._update_command_list({'seen': True}, currentIndex)
@flag
def OP__UNANSWERED(self,currentIndex=None):
self._update_command_list({'unanswered': True}, currentIndex)
@flag
def OP_UNDRAFT(self,currentIndex=None):
self._update_command_list({'undraft': True}, currentIndex)
@flag
def OP__UNFLAGGED(self,currentIndex=None):
self._update_command_list({'unflagged': True}, currentIndex)
@flag
def OP__UNKEYWORD(self,currentIndex=None):
"""
UNKEYWORD <flag>
Messages that do not have the specified keyword flag set.
"""
# todo make it proper somehow
#self.commands.update({'seen': True})
@flag
def OP__UNSEEN(self,currentIndex=None):
self._update_command_list({'unseen': True}, currentIndex)
def OP__SENTBEFORE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentbefore': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SENTBEFORE" provided but with no argument in query list')
def OP__SENTON(self, currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'senton': self.query[currentIndex+1]}, currentIndex)
else:
raise BadSearchOp('Operator "SENTON" provided but with no argument in query list')
def OP__SENTSINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentsince': self.query[currentIndex+1]},currentIndex)
else:
raise BadSearchOp('Operator "SENTSINCE" provided but with no argument in query list')
def OP__SINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'since': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SINCE" provided but with no argument in query list')
def OP__SMALLER(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'smaller': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SMALLER" provided but with no argument in query list')
def OP__SUBJECT(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'subject': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SUBJECT" provided but with no argument in query list')
def OP__TEXT(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'text': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "TEXT" provided but with no argument in query list')
def OP__TO(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'to': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "TO" provided but with no argument in query list')
def OP__UID(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'uid': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "UID" provided but with no argument in query list')
def _NOT_PARSER(self):
#print "NOT PARSER---"
for i in range(len(self.query)):
operator = self.query[i]
#print "operator:"+operator
if (operator=="NOT"):
#print "found NOT index:{}".format(i)
# find what is next command
if (i+1<len(self.query)):
next_possible_command = self.query[i+1]
#print "next_possible_command:{}".format(next_possible_command)
# is possible command a valid operator function?
possible_command_function = self.__get_op_function(next_possible_command)
# indeed a function
if (callable(possible_command_function)):
is_flag = getattr(possible_command_function,'is_flag',False)
if is_flag:
command = {next_possible_command.lower(): False}
self._update_command_list(command,i)
else:
old_operator_value = self.commands.get(next_possible_command.lower())
for command in self.commands_list:
if command['data'].get(next_possible_command.lower(),None):
del command['data']
command['data'] = {
'not-'+next_possible_command.lower():old_operator_value
}
# add the from position so it will be match when doing OR NOT
command['pos'].append(i)
self.commands['not-'+next_possible_command.lower()] = old_operator_value
del self.commands[next_possible_command.lower()]
def _OR_PARSER(self):
"""
we start parsing the OR command and dialectically correct / update the commands using the commands_list metadata
:return:
"""
def _find_command_by_indexes(index1,index2):
#for i in range(len(self.commands_list)):
foundCommands = []
for command in self.commands_list:
pos = command['pos']
#print "command:{}".format(command)
if (index1 in pos):
foundCommands.append(command['data'])
if (index2 in pos):
foundCommands.append(command['data'])
#print "Found OR commands: {}".format(foundCommands)
return foundCommands
for i in range(len(self.query)):
operator = self.query[i]
rhs,lhs = None,None
if operator== "OR":
if (i+1<len(self.query)):
rhs = i+1
if i-1 > -1:
lhs = i-1
# only if both rhs and lhs exist can we go on
if not rhs and not lhs:
raise BadSearchOp('Operator "OR" provided but missing both left hand and right hand side params')
or_commands = _find_command_by_indexes(lhs,rhs)
if len(or_commands)==2:
orDict = {}
for command in or_commands:
#orDict.update(command)
# if command in commands
for k,v in command.iteritems():
#print "K:{} v:{}".format(k,v)
# key of command found
if k in self.commands:
orDict[k] = v
del self.commands[k]
#print "orDict:{}".format(orDict)
self.commands['or'] = orDict
#if command in self.commands
#print "OR RHS:{} LHS:{}".format(rhs, lhs)
def _get_command_argument(self,currentIndex):
"""
will treat the next command as argument to command in currentIndex.
used for all commands that have parameters (arguments),
such as:
FROM <string>
BEFORE <date>
BODY <string> etc...
:param currentIndex:
:return:
"""
# assuming that next item is the value, such as: FROM '[email protected]'
if currentIndex+1 < len(self.query):
#todo check validation
argument = self.query[currentIndex+1]
return argument
else:
return None
@property
def opList(self):
return self.opFunctionList
def __get_op_function(self,operator):
operatorFuncName = "OP__"+operator.upper()
if operatorFuncName in self.opList:
opFunction = getattr(self,operatorFuncName)
return opFunction
else:
return None
def __validate(self):
"""
tries to validate the command set
:return:
"""
print "IMAP4 Search Query List:{}".format(self.query)
if len(self.query) < 1:
raise BadSearchOp("not enough items in list, has to be more then 1 (sequence set,search)")
for i in range(len(self.query)):
operator = self.query[i]
opFunction = self.__get_op_function(operator)
if (opFunction):
#print "operator found:{}".format(operator)
opFunction(i)
else:
pass
#print "operator not found:{}".format(operator)
self._NOT_PARSER()
self._OR_PARSER()
return self.commands
def parse(self, query):
self.query = query
return self.__validate()
if __name__ == "__main__":
test_commands = [
['NOT','FLAGGED','SINCE','1-Feb-1994','NOT','FROM','Smith','BCC', '[email protected]'],
['NOT','BEFORE','1-Feb-1994','NOT','FROM','Smith'],
['SEEN','BEFORE','1-Feb-1994','OR','NOT','FROM','Smith'],
['NOT','SENTBEFORE','1-Feb-1994','NOT','FROM','Smith'],
['SUBJECT','all about love','NOT','TO','[email protected]','SINCE','1-Feb-1994','NOT','FROM','Smith','UID','1:*','OR','NOT','TEXT','Go To Hello'],
['SEEN','BEFORE','1-Feb-1994','OR','NOT','FROM','Smith']
]
for command_set in test_commands:
c = ImapSearchQueryParser()
res = c.parse(command_set)
print "Result:{}".format(res)
#print "command_list:{}".format(c.commands_list)
| syberkitten/Imap4SearchQueryParser | SearchParser.py | Python | gpl-3.0 | 16,261 | 0.011746 |
import os
import re
source_dir = "src/main/res/"
target_dir = "../fastlane/metadata/android/"
def copy_key_from_strings_xml_to_file(xml, key, filename):
match = re.search("<string name=\"" + key + "\">\"?(.*?)\"?</string>", xml, re.DOTALL)
if match:
with open(filename, "w", encoding='utf8') as file:
file.write(match.group(1))
def get_locale_from(dirname):
if not dirname.startswith("values"):
return None
components = dirname.split("-")
if len(components) == 1:
return "en"
elif re.search('[0-9]',components[1]):
return None
elif len(components) == 2:
return components[1]
elif len(components) == 3:
return components[1] + "-" + components[2][1:]
return None
for dirname in sorted(os.listdir(source_dir)):
locale = get_locale_from(dirname)
if not locale:
continue
stringsfile = source_dir + dirname + "/strings.xml"
if not os.path.exists(stringsfile):
continue;
print(locale)
locale_dir = target_dir + locale
if not os.path.exists(locale_dir):
os.makedirs(locale_dir)
with open(stringsfile, 'r', encoding='utf8') as file:
xml = file.read()
copy_key_from_strings_xml_to_file(xml, "store_listing_short_description", locale_dir + "/short_description.txt")
copy_key_from_strings_xml_to_file(xml, "store_listing_full_description", locale_dir + "/full_description.txt")
| Binnette/StreetComplete | app/copyShopDescriptions.py | Python | gpl-3.0 | 1,324 | 0.036254 |
import binascii
import hashlib
import mmap
import struct
import zlib
from . import delta
from .sixx import byte2int
class Error(Exception):
"""Pack Error"""
OBJ_TYPE_COMMIT = 1
OBJ_TYPE_TREE = 2
OBJ_TYPE_BLOB = 3
OBJ_TYPE_TAG = 4
OBJ_TYPE_OFS_DELTA = 6
OBJ_TYPE_REF_DELTA = 7
object_types = {
1: 'commit',
2: 'tree',
3: 'blob',
4: 'tag',
6: 'ofs_delta',
7: 'ref_delta',
}
DELTA_OBJECT_TYPES = [OBJ_TYPE_OFS_DELTA, OBJ_TYPE_REF_DELTA]
class Packfile(object):
def __init__(self, filename):
self.__file = open(filename, 'rb')
if self.__file.read(4) != b'PACK':
raise Error('Not a packfile: %s' % filename)
self.version = struct.unpack('>L', self.__file.read(4))[0]
if self.version != 2:
raise Error(
'Version %d packfile is not supported: %s' %
(self.version, filename))
self.__objectcount = struct.unpack('>L', self.__file.read(4))[0]
self.header_length = self.__file.tell()
self.data = mmap.mmap(
self.__file.fileno(), length=0, access=mmap.ACCESS_READ)
self.object_offset_map = {}
self.offset_id_map = {}
self.offsets = [self.header_length]
@property
def filename(self):
return self.__file.name
def __iter__(self):
for i in range(len(self)):
yield self[i]
def first_object(self):
return self.object_at(self.header_length)
def object_at(self, offset):
try:
return self.object_offset_map[offset]
except KeyError:
obj = PackfileObject(self, offset)
self.object_offset_map[offset] = obj
return obj
def object_by_id(self, object_id):
try:
return self.object_at(self.offset_id_map[object_id])
except KeyError:
for obj in self:
self.offset_id_map[obj.id] = obj.offset
if obj.id == object_id:
return obj
raise Error(
'Object with id=%s not found' %
binascii.hexlify(object_id).decode('ascii'))
def __len__(self):
return self.__objectcount
def __getitem__(self, i):
if i < 0 or i >= len(self):
raise IndexError(
'Object index %d is not in [0,%d]' % (i, len(self)-1))
if len(self.offsets) <= i:
offset = self.offsets[-1]
n = len(self.offsets) - 1
while n <= i:
offset = self.object_at(offset).end
n += 1
assert n == len(self.offsets)
self.offsets.append(offset)
assert len(self.offsets) > i
return self.object_at(self.offsets[i])
def is_checksum_ok(self):
sha = hashlib.sha1()
sha.update(self.data[:-20])
return self.data[-20:] == sha.digest()
def verify(self):
last_object_end = self[len(self)-1].end
assert last_object_end == len(self.data) - 20
assert self.is_checksum_ok
for obj in self:
assert obj.size == len(obj.decompressed_data)
if obj.type in DELTA_OBJECT_TYPES:
assert obj.delta_base
class PackfileObject(object):
def __init__(self, packfile, offset):
self.packfile = packfile
self.pack = packfile.data
self.offset = offset
self.__init_from_header()
self.__end = None
self.__delta_base = None
self.__delta_depth = None
self.__real_type = None
self.__decompressed_data = None
self.__data = None
self.__id = None
def __init_from_header(self):
pos = self.offset
self.type = (byte2int(self.pack[pos]) & 0b01110000) >> 4
sz = byte2int(self.pack[pos]) & 0b00001111
shift = 4
while byte2int(self.pack[pos]) & 0b10000000:
pos += 1
sz |= (byte2int(self.pack[pos]) & 0b01111111) << shift
shift += 7
self.size = sz
if self.type == OBJ_TYPE_OFS_DELTA:
pos += 1
dplus = 0
dplusadd = 1
doff = byte2int(self.pack[pos]) & 0b01111111
while byte2int(self.pack[pos]) & 0b10000000:
pos += 1
dplusadd <<= 7
dplus |= dplusadd
doff <<= 7
doff |= (byte2int(self.pack[pos]) & 0b01111111)
self.delta_offset = doff + dplus
self.__delta_base_id = None
elif self.type == OBJ_TYPE_REF_DELTA:
self.delta_offset = None
self.__delta_base_id = self.pack[pos+1:pos+21]
pos += 20
else:
self.delta_offset = None
self.__delta_base_id = None
self.start = pos + 1
@property
def end(self):
if self.__end is None:
self.__decompress()
return self.__end
@property
def delta_base(self):
if self.__delta_base is None:
if self.delta_offset is not None:
self.__delta_base = self.packfile.object_at(
self.offset - self.delta_offset)
elif self.__delta_base_id is not None:
self.__delta_base = self.packfile.object_by_id(
self.__delta_base_id)
return self.__delta_base
@property
def delta_base_id(self):
if self.__delta_base_id is None:
if self.delta_base is not None:
self.__delta_base_id = self.delta_base.id
return self.__delta_base_id
@property
def delta_depth(self):
if self.__delta_depth is None:
if self.delta_base is not None:
self.__delta_depth = self.delta_base.delta_depth + 1
else:
self.__delta_depth = 0
return self.__delta_depth
@property
def real_type(self):
if self.__real_type is None:
if self.delta_base is not None:
self.__real_type = self.delta_base.real_type
else:
self.__real_type = self.type
return self.__real_type
@property
def raw_data(self):
return self.pack[self.start:self.end]
@property
def decompressed_data(self):
if self.__decompressed_data is None:
self.__decompress()
return self.__decompressed_data
@property
def data(self):
if self.__data is None:
if self.type in DELTA_OBJECT_TYPES:
self.__data = delta.decode_delta(
self.decompressed_data, self.delta_base.data)
else:
self.__data = self.decompressed_data
return self.__data
@property
def id(self):
if self.__id is None:
hdr = '%s %d\0' % (object_types[self.real_type], len(self.data))
sha = hashlib.sha1()
sha.update(hdr.encode('ascii') + self.data)
self.__id = sha.digest()
return self.__id
def __decompress(self):
block_len = 4096
decompressor = zlib.decompressobj()
pos = self.start
data = b''
while True:
in_block_len = min(block_len, len(self.pack) - pos)
in_block = self.pack[pos:pos+in_block_len]
assert len(in_block) == in_block_len, '%d != %d' % (len(in_block), in_block_len)
decompressed = decompressor.decompress(in_block)
pos += in_block_len
data += decompressed
if decompressor.unused_data:
break
if pos >= len(self.pack):
assert pos == len(self.pack)
assert not decompressor.unconsumed_tail
break
self.__decompressed_data = data
self.__end = pos - len(decompressor.unused_data)
def __repr__(self):
typestr = (
object_types[self.type] if self.type in object_types
else 'type=%d' % self.type)
return '<%s %s offset=%d>' % (
self.__class__.__name__, typestr, self.offset)
def main(sys):
Packfile(sys.argv[1]).verify()
if __name__ == '__main__':
import sys
main(sys)
| suutari/gitexpy | gitexpy/pack.py | Python | gpl-2.0 | 8,148 | 0.003436 |
# -*- coding: utf-8 -*-
import numpy as np
def signal_to_class(data, n=2, normalize=True):
"""
Converts a list of signals to a n-dimensional list of classes [buy, .., sell].
Arguments
n (int): Number of classes.
normalize (bool): It normalizes to unity. False - the signal changes only the sign.
Returns
Array of classes.
"""
result = np.array([])
data = np.array(data)
if len(data.shape) > 1:
raise ValueError("The array must be one-dimensional.")
if n == 2:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0])
if item <= 0: # sell
result = np.append(result, [0.0, 1.0])
else:
for item in data:
result = np.append(result, [0.5+item/2.0, 0.5-item/2.0])
elif n == 3:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0, 0.0])
if item < 0: # sell
result = np.append(result, [0.0, 0.0, 1.0])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
else:
for item in data:
if item > 0: # buy
result = np.append(result, [abs(item), (1.0-abs(item)), 0.0])
if item < 0: # sell
result = np.append(result, [0.0, (1.0-abs(item)), abs(item)])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
elif n == 6:
for item in data:
if item >= 0.8 and item <= 1.0:
result = np.append(result, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.4 and item < 0.8:
result = np.append(result, [0.0, 1.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.0 and item < 0.4:
result = np.append(result, [0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
elif item > -0.4 and item < 0.0:
result = np.append(result, [0.0, 0.0, 0.0, 1.0, 0.0, 0.0])
elif item > -0.8 and item <= 0.4:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 1.0, 0.0])
elif item >= -1.0 and item <= 0.8:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
return result.reshape((data.shape[0], n))
def class_to_signal(data, n=2, normalized=True):
"""
Converts a n-dimensional list of classes to a list of signals.
"""
result = np.array([])
if n == 2:
if normalized:
for item in data:
result = np.append(result, 1 if item[0] > item[1] else -1)
else:
for item in data:
result = np.append(result, item[0] * 2 - 1.0)
elif n == 3:
if normalized:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -1.0)
else:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, item[0])
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -item[2])
elif n == 6:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.66)
elif _class == 2:
result = np.append(result, 0.33)
elif _class == 3:
result = np.append(result, -0.33)
elif _class == 4:
result = np.append(result, -0.66)
elif _class == 5:
result = np.append(result, -1.0)
return result
def prepare_target(data, close_index=3, classes=6):
"""
Hello (=
uniform classes
"""
# TODO
# while const
classes = 6
data = np.array(data)
new_target = data[1:, close_index] / data[:-1, close_index]
new_target = np.insert(new_target, obj=0, values=[1.0])
n, bins = np.histogram(new_target, bins=200, range=(0.99, 1.01))
sixth = sum(n) / classes
points = [0., 0., 1., 0., 0.]
_sum = n[100]/2
p_idx = 1
for idx in range(99, -1):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx -= 1
if p_idx < 0:
break
_sum = n[100]/2
p_idx = 3
for idx in range(101, 201):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx += 1
if p_idx > 4:
break
# TODO
def select(a):
a > points[2]
return 1
new_target = [select(x) for x in new_target]
return new_target
| terentjew-alexey/market-analysis-system | mas_tools/classes.py | Python | mit | 5,267 | 0.001899 |
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
# from basemap/examples/daynight.py
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date.strftime("%d %b %Y %H:%M:%S"))
print('test passed!') | TheClimateCorporation/conda-recipes | basemap/run_test.py | Python | apache-2.0 | 1,026 | 0.019493 |
#!/usr/bin/env python2.5
#
# Unit tester for neural_net.py
#
import sys
from neural_net import train, test,\
make_neural_net_basic,\
make_neural_net_two_layer,\
make_neural_net_challenging,\
make_neural_net_with_weights
from neural_net_data import simple_data_sets,\
harder_data_sets,\
challenging_data_sets,\
manual_weight_data_sets,\
all_data_sets
def main(neural_net_func, data_sets, max_iterations=10000):
verbose = True
for name, training_data, test_data in data_sets:
print "-"*40
print "Training on %s data" %(name)
nn = neural_net_func()
train(nn, training_data, max_iterations=max_iterations,
verbose=verbose)
print "Trained weights:"
for w in nn.weights:
print "Weight '%s': %f"%(w.get_name(),w.get_value())
print "Testing on %s test-data" %(name)
result = test(nn, test_data, verbose=verbose)
print "Accuracy: %f"%(result)
if __name__=="__main__":
test_names = ["simple"]
if len(sys.argv) > 1:
test_names = sys.argv[1:]
for test_name in test_names:
if test_name == "simple":
# these test simple logical configurations
main(make_neural_net_basic,
simple_data_sets)
elif test_name == "two_layer":
# these test cases are slightly harder
main(make_neural_net_two_layer,
simple_data_sets + harder_data_sets)
elif test_name == "challenging":
# these tests require a more complex architecture.
main(make_neural_net_challenging, challenging_data_sets)
elif test_name == "patchy":
# patchy problem is slightly tricky
# unless your network gets the right weights.
# it can quickly get stuck in local maxima.
main(make_neural_net_challenging, manual_weight_data_sets)
elif test_name == "weights":
# if you set the 'right' weights for
# the patchy problem it can converge very quickly.
main(make_neural_net_with_weights, manual_weight_data_sets,100)
else:
print "unrecognized test name %s" %(test_name)
| joshishungry/artificial_intel | assignments/lab5/neural_net_tester.py | Python | apache-2.0 | 2,227 | 0.00449 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.