code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from regraph.library.data_structures import (TypedGraph,
TypedDiGraph,
Homomorphism)
from regraph.library.primitives import (merge_attributes)
from regraph.library.utils import keys_by_value
def pullback(h1, h2):
""" Given h1 : B -> D; h2 : C -> D returns A, rh1, rh2
with rh1 : A -> B; rh2 : A -> C """
if h1.target_ != h2.target_:
raise ValueError(
"Homomorphisms don't have the same codomain, can't do pullback"
)
if type(h1.target_) == TypedGraph:
res_graph = TypedGraph()
else:
res_graph = TypedDiGraph()
hom1 = {}
hom2 = {}
for n1 in h1.source_.nodes():
for n2 in h2.source_.nodes():
if not h1.mapping_[n1] in res_graph.nodes():
if h1.mapping_[n1] == h2.mapping_[n2]:
res_graph.add_node(
h1.mapping_[n1],
h1.target_.node[h1.mapping_[n1]].type_,
merge_attributes(h1.source_.node[n1].attrs_,
h2.source_.node[n2].attrs_,
'intersection'))
hom1[h1.mapping_[n1]] = n1
hom2[h2.mapping_[n2]] = n2
for n1 in res_graph.nodes():
for n2 in res_graph.nodes():
if res_graph.is_directed():
if (hom1[n1], hom1[n2]) in h1.source_.edges():
if (hom2[n1], hom2[n2]) in h2.source_.edges():
res_graph.add_edge(n1, n2)
res_graph.set_edge(
n1,
n2,
merge_attributes(
h1.source_.get_edge(hom1[n1], hom1[n2]),
h2.source_.get_edge(hom2[n1], hom2[n2]),
'intersection'))
else:
if (hom1[n1], hom1[n2]) in h1.source_.edges() or (hom1[n2], hom1[n1]) in h1.source_.edges():
if (hom2[n1], hom2[n2]) in h2.source_.edges() or (hom2[n2], hom2[n1]) in h2.source_.edges():
res_graph.add_edge(n1, n2)
res_graph.set_edge(
n1,
n2,
merge_attributes(
h1.source_.get_edge(hom1[n1], hom1[n2]),
h2.source_.get_edge(hom2[n1], hom2[n2]),
'intersection'))
res_h1 = Homomorphism(res_graph, h1.source_, hom1)
res_h2 = Homomorphism(res_graph, h2.source_, hom2)
return res_graph, res_h1, res_h2
def final_PBC(h1, h2):
if h1.target_ != h2.source_:
raise ValueError(
"Codomain of homomorphism 1 and domain of homomorphism 2 " +
"don't match, can't do pullback complement"
)
if not h2.is_monic():
raise ValueError(
"Second homomorphism is not monic, cannot find final pullback complement"
)
if type(h1.target_) == TypedGraph:
res_graph = TypedGraph()
else:
res_graph = TypedDiGraph()
hom1 = {}
hom2 = {}
for node in h2.target_.nodes():
B_node = keys_by_value(h2.mapping_, node)
if len(B_node) > 0:
mapped_A_nodes = keys_by_value(h1.mapping_, B_node[0])
print(mapped_A_nodes)
for A_node in mapped_A_nodes:
res_graph.add_node(
str(A_node) + "_" + str(node),
h2.target_.node[h2.mapping_[h1.mapping_[A_node]]].type_,
merge_attributes(
h1.source_.node[A_node].attrs_,
h2.target_.node[h2.mapping_[h1.mapping_[A_node]]].attrs_,
"intersection"
)
)
hom1[A_node] = str(A_node) + "_" + str(node)
hom2[str(A_node) + "_" + str(node)] = h2.mapping_[h1.mapping_[A_node]]
else:
res_graph.add_node(
str(node) + "_",
h2.target_.node[node].type_,
h2.target_.node[node].attrs_
)
hom2[str(node) + "_"] = node
for s, t in h2.target_.edges():
B_s = keys_by_value(h2.mapping_, s)
B_t = keys_by_value(h2.mapping_, t)
if len(B_s) > 0 and len(B_t) > 0:
mapped_A_ss = keys_by_value(h1.mapping_, B_s[0])
mapped_A_ts = keys_by_value(h1.mapping_, B_t[0])
for A_s in mapped_A_ss:
for A_t in mapped_A_ts:
if res_graph.is_directed():
if hom1[A_s] == hom1[A_t] and (A_s, A_t) not in h1.source_.edges():
res_graph.add_edge(
hom1[A_s],
hom1[A_t],
h2.target_.get_edge(
h2.mapping_[h1.mapping_[A_s]],
h2.mapping_[h1.mapping_[A_t]])
)
else:
res_graph.add_edge(
hom1[A_s],
hom1[A_t],
merge_attributes(
h1.source_.get_edge(A_s, A_t),
h2.target_.get_edge(
h2.mapping_[h1.mapping_[A_s]],
h2.mapping_[h1.mapping_[A_t]]),
"intersection"
)
)
else:
if hom1[A_s] == hom1[A_t] and (A_s, A_t) not in h1.source_.edges() and (A_t, A_s) not in h1.source_.edges():
res_graph.add_edge(
hom1[A_s],
hom1[A_t],
h2.target_.get_edge(
h2.mapping_[h1.mapping_[A_s]],
h2.mapping_[h1.mapping_[A_t]])
)
pass
else:
res_graph.add_edge(
hom1[A_s],
hom1[A_t],
merge_attributes(
h1.source_.get_edge(A_s, A_t),
h2.target_.get_edge(
h2.mapping_[h1.mapping_[A_s]],
h2.mapping_[h1.mapping_[A_t]]),
"intersection"
)
)
else:
if len(B_s) == 0:
sources_to_add = [str(s) + "_"]
else:
mapped_A_ss = keys_by_value(h1.mapping_, B_s[0])
sources_to_add = [hom1[A_s] for A_s in mapped_A_ss]
if len(B_t) == 0:
targets_to_add = [str(t) + "_"]
else:
mapped_A_ts = keys_by_value(h1.mapping_, B_t[0])
targets_to_add = [hom1[A_t] for A_t in mapped_A_ts]
for new_s in sources_to_add:
for new_t in targets_to_add:
res_graph.add_edge(
new_s,
new_t,
h2.target_.edge[s][t])
res_h1 = Homomorphism(h1.source_, res_graph, hom1)
res_h2 = Homomorphism(res_graph, h2.target_, hom2)
return (res_graph, res_h1, res_h2)
def pushout(h1, h2):
if h1.source_ != h2.source_:
raise ValueError(
"Domain of homomorphism 1 and domain of homomorphism 2 " +
"don't match, can't do pushout"
)
hom1 = {}
hom2 = {}
if type(h1.target_) == TypedGraph:
res_graph = TypedGraph()
else:
res_graph = TypedDiGraph()
for node in h1.source_.nodes():
res_graph.add_node(
str(h1.mapping_[node]) + "_" + str(h2.mapping_[node]),
h1.source_.node[node].type_,
merge_attributes(
h1.target_.node[h1.mapping_[node]].attrs_,
h2.target_.node[h2.mapping_[node]].attrs_,
"union"
)
)
hom1[h1.mapping_[node]] =\
str(h1.mapping_[node]) + "_" + str(h2.mapping_[node])
hom2[h2.mapping_[node]] =\
str(h1.mapping_[node]) + "_" + str(h2.mapping_[node])
for s, t in h1.source_.edges():
res_graph.add_edge(
str(h1.mapping_[s]) + "_" + str(h2.mapping_[s]),
str(h1.mapping_[t]) + "_" + str(h2.mapping_[t]),
merge_attributes(
h1.target_.get_edge(h1.mapping_[s], h1.mapping_[t]),
h2.target_.get_edge(h2.mapping_[s], h2.mapping_[t]),
"union"
)
)
for node in h1.target_.nodes():
if node not in h1.mapping_.values():
res_graph.add_node(
str(node) + "_",
h1.target_.node[node].type_,
h1.target_.node[node].attrs_
)
hom1[node] = str(node) + "_"
for node in h2.target_.nodes():
if node not in h2.mapping_.values():
res_graph.add_node(
str(node) + "_",
h2.target_.node[node].type_,
h2.target_.node[node].attrs_
)
hom2[node] = str(node) + "_"
for s, t in h1.target_.edges():
if s not in h1.mapping_.values() or t not in h1.mapping_.values():
res_graph.add_edge(
hom1[s],
hom1[t],
h1.target_.get_edge(s, t)
)
if res_graph.is_directed():
if (hom1[s], hom1[t]) not in res_graph.edges():
res_graph.add_edge(
hom1[s],
hom1[t],
h1.target_.get_edge(s, t)
)
else:
if (hom1[s], hom1[t]) not in res_graph.edges() and (hom1[t], hom1[s]) not in res_graph.edges():
res_graph.add_edge(
hom1[s],
hom1[t],
h1.target_.get_edge(s, t)
)
for s, t in h2.target_.edges():
if s not in h2.mapping_.values() or t not in h2.mapping_.values():
res_graph.add_edge(
hom2[s],
hom2[t],
h2.target_.get_edge(s, t)
)
if res_graph.is_directed():
if (hom2[s], hom2[t]) not in res_graph.edges():
res_graph.add_edge(
hom2[s],
hom2[t],
h2.target_.get_edge(s, t)
)
else:
if (hom2[s], hom2[t]) not in res_graph.edges() and (hom2[t], hom2[s]) not in res_graph.edges():
res_graph.add_edge(
hom2[s],
hom2[t],
h2.target_.get_edge(s, t)
)
res_h1 = Homomorphism(h1.target_, res_graph, hom1)
res_h2 = Homomorphism(h2.target_, res_graph, hom2)
return (res_graph, res_h1, res_h2)
| eugeniashurko/ReGraph | regraph/library/category_op.py | Python | mit | 11,395 |
from .base import BaseResult
class PublishBodyResult(BaseResult):
def set_result(self, result):
super().set_result(result)
@property
def publication_id(self):
return self._result['publicationID']
@property
def publicationID(self):
"""
synonym
"""
return self.publication_id
@property
def url(self):
return self._result['url']
| ta2xeo/python3-kii | kii/results/publishbody.py | Python | mit | 415 |
from flask import Blueprint, render_template
import logging
logr = logging.getLogger('gimphub.blueprint_static')
static_B = Blueprint('static', __name__)
@static_B.route('/terms', methods = ['GET'])
def terms():
return render_template('terms.html')
@static_B.route('/privacy', methods = ['GET'])
def privacy():
return render_template('privacy.html')
@static_B.route('/licenses', methods = ['GET'])
def licenses():
return render_template('licenses.html')
@static_B.route('/registercomplete', methods = ['GET', 'POST'])
def registercomplete():
return render_template('registercomplete.html') | Jolopy/GimpHub | app/blueprint_static.py | Python | gpl-2.0 | 635 |
import logging
import ckan.plugins as p
from ckanext.archiver.model import Archival
from ckanext.qa.model import QA, aggregate_qa_for_a_dataset
log = logging.getLogger(__name__)
_ = p.toolkit._
@p.toolkit.side_effect_free
def qa_resource_show(context, data_dict):
'''
Returns the QA and Archival information for a package or resource.
'''
model = context['model']
session = context['session']
# user = context.get('user')
# p.toolkit.check_access('qa_resource_show', context, data_dict)
res_id = p.toolkit.get_or_bust(data_dict, 'id')
res = session.query(model.Resource).get(res_id)
if not res:
raise p.toolkit.ObjectNotFound
archival = Archival.get_for_resource(res_id)
qa = QA.get_for_resource(res_id)
pkg = res.resource_group.package
return_dict = {
'name': pkg.name,
'title': pkg.title,
'id': res.id
}
return_dict['archival'] = archival.as_dict()
return_dict.update(qa.as_dict())
return return_dict
@p.toolkit.side_effect_free
def qa_package_openness_show(context, data_dict):
'''
Returns the QA score for a package, aggregating the
scores of its resources.
'''
model = context['model']
session = context['session']
p.toolkit.check_access('qa_package_openness_show', context, data_dict)
dataset_id = p.toolkit.get_or_bust(data_dict, 'id')
dataset = session.query(model.Package).get(dataset_id)
if not dataset:
raise p.toolkit.ObjectNotFound
qa_objs = QA.get_for_package(dataset.id)
qa_dict = aggregate_qa_for_a_dataset(qa_objs)
return qa_dict
| ckan/ckanext-qa | ckanext/qa/logic/action.py | Python | mit | 1,625 |
# -*- coding: utf-8 -*-
__version__ = '1.0.2'
| 596acres/django-livinglots-forms | livinglots_forms/__init__.py | Python | agpl-3.0 | 46 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# SchemaDocument Exceptions
class StructureError(Exception):pass
class BadKeyError(Exception):pass
class AuthorizedTypeError(Exception):pass
class SchemaTypeError(Exception):pass
class DefaultFieldTypeError(Exception):pass
# Mongo Document Exceptions
from bson import InvalidDocument
try:
from pymongo.connection import OperationFailure
except ImportError:
from pymongo.errors import OperationFailure
class ConnectionError(Exception):pass
class BadIndexError(Exception):pass
#class MongoAuthException(Exception):pass
#class MultipleResultsFound(Exception):pass
#class BadIndexError(Exception):pass
#class AutoReferenceError(Exception):pass
#class MaxDocumentSizeError(Exception):pass
#class OptionConflictError(Exception):pass
#class UpdateQueryError(Exception):pass
| MobicoTIC/MongoLite | mongolite/mongo_exceptions.py | Python | bsd-3-clause | 2,399 |
# coding=utf-8
from flask.ext.restplus import Resource
from flask import request, Response
from app import api
from utils.wordeater_api import ApiResponse
from cerberus import Validator
from config import API_PATH, ENVELOPE_DATA
from services.service_locator import ServiceLocator
from decorators.authenticate import expose
from models import picture_schema, picture_input_fields, picture_fields
from errors import ServerErrors, PicturesErrors
from logger import error
__author__ = 'Glebov Boris'
pictures_ns = api.namespace(name='Pictures', description="Requests related with pictures", path=API_PATH)
class PictureResource(Resource):
def __init__(self, api, *args, **kwargs):
Resource.__init__(self, api, *args, **kwargs)
self.ss = ServiceLocator.resolve(ServiceLocator.SESSIONS)
self.ps = ServiceLocator.resolve(ServiceLocator.PICTURES)
self.gs = ServiceLocator.resolve(ServiceLocator.GIPHY)
@pictures_ns.route('/pictures/random/', endpoint='random')
class PicturesRandomAPI(PictureResource):
@expose
@api.doc(body=picture_input_fields)
@api.marshal_with(picture_fields, envelope=ENVELOPE_DATA, code=200)
def post(self):
"""
Translate text
:return:
"""
v = Validator(picture_schema)
args = v.validated(request.get_json())
if args is None:
return ApiResponse(status=4001, errors=v.errors)
text = args.get(u'text')
try:
picture = self.ps.get(text)
if picture:
return picture
return self.ps.add(text, self.gs.random(text))
except Exception as ex:
error(u'PicturesRandomAPI.post(text={0})'.format(text), ex)
return ApiResponse(status=500, errors=ServerErrors.internal_server_error([]))
@pictures_ns.route('/picture/<string:text>/', endpoint='picture')
class PictureAPI(PictureResource):
@expose
def get(self, text):
"""
Returns picture entity
:param picture_id:
:return:
"""
picture = self.ps.get(text)
if picture is None:
return ApiResponse(status=404, errors=PicturesErrors.picture_doesnt_exists(['text']))
return Response(picture.fs.content, mimetype='image/gif')
| h-qub/wordeater-web | we-web/api/resources/pictures/pictures.py | Python | mit | 2,290 |
"""
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
USE_I18N = True
# For displaying the dummy text, we need to provide a language mapping.
LANGUAGES = (
('eo', 'Esperanto'),
)
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "localhost:8020"
# 8000
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': '[email protected]:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': '[email protected]:MITx/6002x-fall-2012.git',
'origin': '[email protected]:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': '[email protected]:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# disable NPS survey in dev mode
FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
| apigee/edx-platform | cms/envs/dev.py | Python | agpl-3.0 | 6,700 |
"""
Utility functions for reading MITgcm mds files (.meta / .data)
"""
# python 3 compatiblity
from __future__ import print_function, division
import re
import os
import numpy as np
import warnings
from functools import reduce
from dask import delayed
import dask.array as dsa
from dask.base import tokenize
import xarray as xr
import sys
def parse_meta_file(fname):
"""Get the metadata as a dict out of the MITgcm mds .meta file.
PARAMETERS
----------
fname : str
Path to the .meta file
RETURNS
-------
flds : dict
Metadata in dictionary form.
"""
flds = {}
basename = re.match('(^.+?)\..+', os.path.basename(fname)).groups()[0]
flds['basename'] = basename
with open(fname) as f:
text = f.read()
# split into items
for item in re.split(';', text):
# remove whitespace at beginning
item = re.sub('^\s+', '', item)
match = re.match('(\w+) = (\[|\{)(.*)(\]|\})', item, re.DOTALL)
if match:
key, _, value, _ = match.groups()
# remove more whitespace
value = re.sub('^\s+', '', value)
value = re.sub('\s+$', '', value)
# print key,':', value
flds[key] = value
# now check the needed things are there
needed_keys = ['dimList', 'nDims', 'nrecords', 'dataprec']
for k in needed_keys:
assert k in flds
# transform datatypes
flds['nDims'] = int(flds['nDims'])
flds['nrecords'] = int(flds['nrecords'])
# endianness is set by _read_mds
flds['dataprec'] = np.dtype(re.sub("'", '', flds['dataprec']))
flds['dimList'] = [[int(h) for h in
re.split(',', g)] for g in
re.split(',\n', flds['dimList'])]
if 'fldList' in flds:
flds['fldList'] = [re.match("'*(\w+)", g).groups()[0] for g in
re.split("'\s+'", flds['fldList'])]
assert flds['nrecords'] == len(flds['fldList'])
return flds
def _get_useful_info_from_meta_file(metafile):
# why does the .meta file contain so much repeated info?
# Here we just get the part we need
# and reverse order (numpy uses C order, mds is fortran)
meta = parse_meta_file(metafile)
shape = [g[0] for g in meta['dimList']][::-1]
assert len(shape) == meta['nDims']
# now add an extra for number of recs
nrecs = meta['nrecords']
shape.insert(0, nrecs)
dtype = meta['dataprec']
if 'fldList' in meta:
fldlist = meta['fldList']
name = fldlist[0]
else:
name = meta['basename']
fldlist = None
return nrecs, shape, name, dtype, fldlist
def read_mds(fname, iternum=None, use_mmap=None, endian='>', shape=None,
dtype=None, use_dask=True, extra_metadata=None, chunks="3D",
llc=False, llc_method="smallchunks", legacy=True):
"""Read an MITgcm .meta / .data file pair
PARAMETERS
----------
fname : str
The base name of the data file pair (without a .data or .meta suffix)
iternum : int, optional
The iteration number suffix
use_mmap : bool, optional
Whether to read the data using a numpy.memmap.
Mutually exclusive with `use_dask`.
endian : {'>', '<', '|'}, optional
Dndianness of the data
dtype : numpy.dtype, optional
Data type of the data (will be inferred from the .meta file by default)
shape : tuple, optional
Shape of the data (will be inferred from the .meta file by default)
use_dask : bool, optional
Whether wrap the reading of the raw data in a ``dask.delayed`` object.
Mutually exclusive with `use_mmap`.
extra_metadata : dict, optional
Dictionary containing some extra metadata that will be appended to
content of MITgcm meta file to create the file_metadata. This is needed
for llc type configurations (global or regional). In this case the
extra metadata used is of the form :
aste = {'has_faces': True, 'ny': 1350, 'nx': 270,
'ny_facets': [450,0,270,180,450],
'pad_before_y': [90,0,0,0,0],
'pad_after_y': [0,0,0,90,90],
'face_facets': [0, 0, 2, 3, 4, 4],
'facet_orders' : ['C', 'C', 'C', 'F', 'F'],
'face_offsets' : [0, 1, 0, 0, 0, 1],
'transpose_face' : [False, False, False,
True, True, True]}
llc90 = {'has_faces': True, 'ny': 13*90, 'nx': 90,
'ny_facets': [3*90, 3*90, 90, 3*90, 3*90],
'face_facets': [0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
'facet_orders': ['C', 'C', 'C', 'F', 'F'],
'face_offsets': [0, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 1, 2],
'transpose_face' : [False, False, False,
False, False, False, False,
True, True, True, True, True, True]}
llc grids have typically 5 rectangular facets and will be mapped onto
N (=13 for llc, =6 for aste) square faces.
Keys for the extra_metadata dictionary can be of different types and
length:
* bool:
#. has_faces : True if domain is combination of connected grids
* list of len=nfacets:
#. ny_facets : number of points in y direction of each facet
(usually n * nx)
#. pad_before_y (Regional configuration) : pad data with N zeros
before array
#. pad_after_y (Regional configuration) : pad data with N zeros
after array
#. facet_order : row/column major order of this facet
* list of len=nfaces:
#. face_facets : facet of origin for this face
#. face_offsets : position of the face in the facet (0 = start)
#. transpose_face : transpose the data for this face
chunks : {'3D', '2D', 'CS'}
Which routine to use for chunking data. '2D' splits the file
into a individual dask chunk of size (nx x nx) for each face (if llc)
of each record of each level.
'3D' loads the whole raw data file (either into memory or as a
numpy.memmap) and is not suitable for llc configurations.
The different methods will have different memory and i/o performance
depending on the details of the system configuration.
'CS' loads 2d (nx, ny) chunks for each face of the Cube Sphere model.
obsolete : llc and llc_methods, kept for testing
RETURNS
-------
data : dict
The keys correspond to the variable names of the different variables in
the data file. The values are the data itself, either as an
``numpy.ndarray``, ``numpy.memmap``, or ``dask.array.Array`` depending
on the options selected.
"""
if use_mmap and use_dask:
raise TypeError('`use_mmap` and `use_dask` are mutually exclusive:'
' Both memory-mapped and dask arrays'
' use lazy evaluation.')
elif use_mmap is None:
use_mmap = False if use_dask else True
if iternum is None:
istr = ''
else:
assert isinstance(iternum, int)
istr = '.%010d' % iternum
datafile = fname + istr + '.data'
metafile = fname + istr + '.meta'
if use_mmap and use_dask:
raise TypeError('nope')
elif use_mmap is None:
use_mmap = False if use_dask else True
# get metadata
try:
metadata = parse_meta_file(metafile)
nrecs, shape, name, dtype, fldlist = \
_get_useful_info_from_meta_file(metafile)
dtype = dtype.newbyteorder(endian)
except IOError:
# we can recover from not having a .meta file if dtype and shape have
# been specified already
if shape is None:
raise IOError("Cannot find the shape associated to %s in the \
metadata." % fname)
elif dtype is None:
raise IOError("Cannot find the dtype associated to %s in the \
metadata, please specify the default dtype to \
avoid this error." % fname)
else:
# add time dimensions
shape = (1,) + shape
shape = list(shape)
name = os.path.basename(fname)
metadata = {'basename': name, 'shape': shape}
# figure out dimensions
ndims = len(shape)-1
if ndims == 3:
_, nz, ny, nx = shape
dims_vars = ('nz', 'ny', 'nx')
elif ndims == 2:
_, ny, nx = shape
nz = 1
dims_vars = ('ny', 'nx')
# and variables
if 'fldList' not in metadata:
metadata['fldList'] = [metadata['basename']]
# if not provided in extra_metadata, we assume that the variables in file
# have the same shape
if extra_metadata is None or 'dims_vars' not in extra_metadata:
dims_vars_list = []
for var in metadata['fldList']:
dims_vars_list.append(dims_vars)
# add extra dim information and set aside
metadata.update({'dims_vars': dims_vars_list,
'dtype': dtype, 'endian': endian,
'nx': nx, 'ny': ny,
'nz': nz, 'nt': 1}) # parse_meta harcoded for nt = 1
file_metadata = metadata.copy()
# by default, we set to non-llc grid
file_metadata.update({'filename': datafile, 'vars': metadata['fldList'],
'has_faces': False})
# extra_metadata contains informations about llc/regional llc grid
if extra_metadata is not None and llc:
nhpts_ex = extra_metadata['nx'] * extra_metadata['ny']
nhpts = metadata['nx'] * metadata['ny']
# check that nx * ny is consistent between extra_metadata and meta file
# unless it's a vertical profile nx = ny = 1
if nhpts > 1:
assert nhpts_ex == nhpts
if extra_metadata is not None:
file_metadata.update(extra_metadata)
# --------------- LEGACY --------------------------
# from legacy code (needs to be phased out)
# transition code to keep unit tests working
if llc:
chunks = "2D"
# --------------- /LEGACY --------------------------
# it is possible to override the values of nx, ny, nz from extra_metadata
# (needed for bug meta file ASTE) except if those are = 1 (vertical coord)
# where we override by values found in meta file
for dim in ['nx', 'ny', 'nz']:
if metadata[dim] == 1:
file_metadata.update({dim: 1})
# read all variables from file into the list d
d = read_all_variables(file_metadata['fldList'], file_metadata,
use_mmap=use_mmap, use_dask=use_dask,
chunks=chunks)
# convert list into dictionary
out = {}
for n, name in enumerate(file_metadata['fldList']):
if ndims == 3:
out[name] = d[n]
elif ndims == 2:
out[name] = d[n][:, 0, :]
# --------------- LEGACY --------------------------
# from legacy code (needs to be phased out)
# transition code to keep unit tests working
if legacy:
for n, name in enumerate(file_metadata['fldList']):
out[name] = out[name][0, :]
# --------------- /LEGACY --------------------------
return out
def read_raw_data(datafile, dtype, shape, use_mmap=False, offset=0,
order='C', partial_read=False):
"""Read a raw binary file and shape it.
PARAMETERS
----------
datafile : str
Path to a .data file
dtype : numpy.dtype
Data type of the data
shape : tuple
Shape of the data
use_memmap : bool, optional
Whether to read the data using a numpy.memmap
offset : int, optional
Offset (in bytes) to apply on read
order : str, optional
Row/Column Major = 'C' or 'F'
partial_read : bool, optional
If reading part of the file
RETURNS
-------
data : numpy.ndarray
The data (or a memmap to it)
"""
number_of_values = reduce(lambda x, y: x * y, shape)
expected_number_of_bytes = number_of_values * dtype.itemsize
actual_number_of_bytes = os.path.getsize(datafile)
if not partial_read:
# first check that partial_read and offset are used together
if offset != 0:
raise ValueError(
'When partial_read==False, offset will not be read')
# second check to be sure there is the right number of bytes in file
if expected_number_of_bytes != actual_number_of_bytes:
raise IOError('File `%s` does not have the correct size '
'(expected %g, found %g)' %
(datafile,
expected_number_of_bytes,
actual_number_of_bytes))
else:
pass
if offset < actual_number_of_bytes:
pass
else:
raise ValueError('bytes offset %g is greater than file size %g' %
(offset, actual_number_of_bytes))
with open(datafile, 'rb') as f:
if use_mmap:
data = np.memmap(f, dtype=dtype, mode='r', offset=offset,
shape=tuple(shape), order=order)
else:
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=number_of_values)
data = data.reshape(shape, order=order)
data.shape = shape
return data
def parse_namelist(file, silence_errors=True):
"""Read a FOTRAN namelist file into a dictionary.
PARAMETERS
----------
file : str
Path to the namelist file to read.
RETURNS
-------
data : dict
Dictionary of each namelist as dictionaries
"""
def parse_val(val):
"""Parse a string and cast it in the appropriate python type."""
if ',' in val: # It's a list, parse recursively
return [parse_val(subval.strip()) for subval in val.split(',')]
elif val.startswith("'"): # It's a string, remove quotes.
return val[1:-1].strip()
elif '*' in val: # It's shorthand for a repeated value
repeat, number = val.split('*')
return [parse_val(number)] * int(repeat)
elif val in ['.TRUE.', '.FALSE.']:
return val == '.TRUE.'
elif '.' in val or 'E' in val: # It is a Real (float)
return float(val)
# Finally try for an int
return int(val)
data = {}
current_namelist = ''
raw_lines = []
with open(file) as f:
for line in f:
# Remove comments
line = line.split('#')[0].strip()
if '=' in line or '&' in line:
raw_lines.append(line)
elif line:
raw_lines[-1] += line
for line in raw_lines:
if line.startswith('&'):
current_namelist = line.split('&')[1]
if current_namelist: # else : it's the end of a namelist.
data[current_namelist] = {}
else:
field, value = map(str.strip, line[:-1].split('='))
try:
value = parse_val(value)
except ValueError:
mess = ('Unable to read value for field {field} in file {file}: {value}'
).format(field=field, file=file, value=value)
if silence_errors:
warnings.warn(mess)
value = None
else:
raise ValueError(mess)
if '(' in field: # Field is an array
field, idxs = field[:-1].split('(')
if field not in data[current_namelist]:
data[current_namelist][field] = []
# For generality, we will assign a slice, so we cast in list
value = value if isinstance(value, list) else [value]
idxs = [slice(int(idx.split(':')[0]) - 1,
int(idx.split(':')[1]))
if ':' in idx else slice(int(idx) - 1, int(idx))
for idx in idxs.split(',')]
datafield = data[current_namelist][field]
# Array are 1D or 2D, if 2D we extend it to the good shape,
# filling it with [] and pass the appropriate sublist.
# Only works with slice assign (a:b) in first position.
missing_spots = idxs[-1].stop - len(datafield)
if missing_spots > 0:
datafield.extend([] for i in range(missing_spots))
if len(idxs) == 2:
datafield = datafield[idxs[1].start]
datafield[idxs[0]] = value
else:
data[current_namelist][field] = value
return data
def parse_available_diagnostics(fname, layers={}):
"""Examine the available_diagnostics.log file and translate it into
useful variable metadata.
PARAMETERS
----------
fname : str or buffer
the path to the diagnostics file or a file buffer
layers : dict (optional)
dictionary mapping layers names to dimension sizes
RETURNS
-------
all_diags : a dictionary keyed by variable names with values
(coords, description, units)
"""
all_diags = {}
diag_id_lookup = {}
mate_lookup = {}
# mapping between the available_diagnostics.log codes and the actual
# coordinate names
# http://mitgcm.org/public/r2_manual/latest/online_documents/node268.html
xcoords = {'U': 'i_g', 'V': 'i', 'M': 'i', 'Z': 'i_g'}
ycoords = {'U': 'j', 'V': 'j_g', 'M': 'j', 'Z': 'j_g'}
rcoords = {'M': 'k', 'U': 'k_u', 'L': 'k_l'}
# need to be able to accept string filename or buffer
def process_buffer(f):
for l in f:
# will automatically skip first four header lines
c = re.split('\|', l)
if len(c) == 7 and c[0].strip() != 'Num':
# parse the line to extract the relevant variables
key = c[1].strip()
diag_id = int(c[0].strip())
diag_id_lookup[diag_id] = key
levs = int(c[2].strip())
mate = c[3].strip()
if mate:
mate = int(mate)
mate_lookup[key] = mate
code = c[4]
units = c[5].strip()
desc = c[6].strip()
# decode what those variables mean
hpoint = code[1]
rpoint = code[8]
xycoords = [ycoords[hpoint], xcoords[hpoint]]
rlev = code[9]
if rlev == '1' and levs == 1:
zcoord = []
elif rlev == 'R':
zcoord = [rcoords[rpoint]]
elif rlev == 'L': # pragma : no cover
# max(Nr, Nrphys) according to doc...
# this seems to be only used in atmos
# with different levels for dynamics and physics
# setting to Nr meanwhile
zcoord = [rcoords[rpoint]]
elif rlev == 'X':
if layers:
layer_name = key.ljust(8)[-4:].strip()
n_layers = layers[layer_name]
if levs == n_layers:
suffix = 'bounds'
elif levs == (n_layers-1):
suffix = 'center'
elif levs == (n_layers-2):
suffix = 'interface'
else: # pragma: no cover
suffix = None
warnings.warn("Could not match rlev = %g to a "
"layers coordiante" % rlev)
# dimname = ('layer_' + layer_name + '_' +
# suffix if suffix
dimname = (('l' + layer_name[0] + '_' +
suffix[0]) if suffix else '_UNKNOWN_')
zcoord = [dimname]
else:
zcoord = ['_UNKNOWN_']
else: # pragma: no cover
warnings.warn("Not sure what to do with rlev = " + rlev)
warnings.warn("corresponding diag_id = " + str(diag_id))
zcoord = ['_UNKNOWN_']
coords = zcoord + xycoords
all_diags[key] = dict(dims=coords,
# we need a standard name
attrs={'standard_name': key,
'long_name': desc,
'units': units})
try:
with open(fname) as f:
process_buffer(f)
except TypeError:
process_buffer(fname)
# add mate information
for key, mate_id in mate_lookup.items():
all_diags[key]['attrs']['mate'] = diag_id_lookup[mate_id]
return all_diags
# stuff related to llc mds file structure
LLC_NUM_FACES=13
facet_strides = ((0,3), (3,6), (6,7), (7,10), (10,13))
facet_orders = ('C', 'C', 'C', 'F', 'F')
face_facets = [0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4]
face_offsets = [0, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 1, 2]
transpose_face = [False, False, False, False, False, False, False,
True, True, True, True, True, True]
def _read_2d_facet(fname, nfacet, nlev, nx, dtype='>f8', memmap=True):
# make sure we have a valid dtype
dtype = np.dtype(dtype)
nbytes = dtype.itemsize
# where the facet starts in the file
facet_offset = facet_strides[nfacet][0] * nx * nx * nbytes
level_offset = LLC_NUM_FACES * nx * nx * nbytes * nlev
offset = facet_offset + level_offset
# the array order of the facet
facet_order = facet_orders[nfacet]
# the size shape of the facet
facet_ny = (facet_strides[nfacet][1] - facet_strides[nfacet][0])*nx
facet_shape = (facet_ny, nx)
facet_nitems = facet_ny * nx
with open(fname, 'rb') as f:
#print("Reading %s facet %g nlev %g" % (fname, nfacet, nlev))
if memmap:
data = np.memmap(f, dtype=dtype, mode='r', offset=offset,
shape=facet_shape, order=facet_order)
else:
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=facet_nitems)
data = data.reshape(facet_shape, order=facet_order)
return data
def _read_2d_face(fname, nface, nlev, nx, dtype='>f8', memmap=True):
# make sure we have a valid dtype
nfacet = face_facets[nface]
face_slice = slice(nx*face_offsets[nface], nx*(face_offsets[nface]+1))
facet_offset = nx * face_offsets[nface]
data_facet = _read_2d_facet(fname, nfacet, nlev, nx,
dtype=dtype, memmap=memmap)
data = data_facet[face_slice]
if transpose_face[nface]:
data = data.T
return data
# manually construct dask graph
def read_3d_llc_data(fname, nz, nx, dtype='>f8', memmap=True, nrecs=1,
method="smallchunks"):
"""Read a three-dimensional LLC file using a custom dask graph.
PARAMETERS
----------
fname : string
Path to the file on disk
nz : int
Number of vertical levels
nx : int
Size of each face side dimension
dtype : np.dtype, optional
Datatype of the data
memmap : bool, optional
Whether to read the data using np.memmap. Forced to be ``False`` for
``method="smallchunks"``.
nrecs : int, optional
The number of records in a multi-record file
method : {"smallchunks", "bigchunks"}, optional
Which routine to use for reading raw LLC. "smallchunks" splits the file
into a individual dask chunk of size (nx x nx) for each face of each
level (i.e. the total number of chunks is 13 * nz). "bigchunks" loads
the whole raw data file (either into memory or as a numpy.memmap),
splits it into faces, and concatenates those faces together using
``dask.array.concatenate``. The different methods will have different
memory and i/o performance depending on the details of the system
configuration.
RETURNS
-------
data : dask.array.Array
The data
"""
dtype = np.dtype(dtype)
if method == "smallchunks":
def load_chunk(nface, nlev):
return _read_2d_face(fname, nface, nlev, nx,
dtype=dtype, memmap=memmap)[None, None, None]
chunks = (1, 1, 1, nx, nx)
shape = (nrecs, nz, LLC_NUM_FACES, nx, nx)
name = 'llc-' + tokenize(fname, shape, dtype,
method) # unique identifier
# we hack the record number as extra vertical levels
dsk = {(name, nrec, nlev, nface, 0, 0): (load_chunk, nface,
nlev + nz*nrec)
for nface in range(LLC_NUM_FACES)
for nlev in range(nz)
for nrec in range(nrecs)}
data = dsa.Array(dsk, name, chunks, dtype=dtype, shape=shape)
elif method == "bigchunks":
shape = (nrecs, nz, LLC_NUM_FACES*nx, nx)
# the dimension that needs to be reshaped
jdim = 2
data = read_raw_data(fname, dtype, shape, use_mmap=memmap)
data = _reshape_llc_data(data, jdim)
# automatically squeeze off z dimension; this matches mds file behavior
if nz == 1:
data = data[:, 0]
return data
# a deprecated function that I can't bear to delete because it was painful to
# write
def _reshape_llc_data(data, jdim): # pragma: no cover
"""Fix the weird problem with llc data array order."""
# Can we do this without copying any data?
# If not, we need to go upstream and implement this at the MDS level
# Or can we fudge it with dask?
# this is all very specific to the llc file output
# would be nice to generalize more, but how?
nside = data.shape[jdim] // LLC_NUM_FACES
# how the LLC data is laid out along the j dimension
strides = ((0,3), (3,6), (6,7), (7,10), (10,13))
# whether to reshape each face
reshape = (False, False, False, True, True)
# this will slice the data into 5 facets
slices = [jdim * (slice(None),) + (slice(nside*st[0], nside*st[1]),)
for st in strides]
facet_arrays = [data[sl] for sl in slices]
face_arrays = []
for ar, rs, st in zip(facet_arrays, reshape, strides):
nfaces_in_facet = st[1] - st[0]
shape = list(ar.shape)
if rs:
# we assume the other horizontal dimension is immediately after jdim
shape[jdim] = ar.shape[jdim+1]
shape[jdim+1] = ar.shape[jdim]
# insert a length-1 dimension along which to concatenate
shape.insert(jdim, 1)
# this modify the array shape in place, with no copies allowed
# but it doesn't work with dask arrays
# ar.shape = shape
ar = ar.reshape(shape)
# now ar is propery shaped, but we still need to slice it into faces
face_slice_dim = jdim + 1 + rs
for n in range(nfaces_in_facet):
face_slice = (face_slice_dim * (slice(None),) +
(slice(nside*n, nside*(n+1)),))
data_face = ar[face_slice]
face_arrays.append(data_face)
# We can't concatenate using numpy (hcat etc.) because it makes a copy,
# presumably loading the memmaps into memory.
# Using dask gets around this.
# But what if we want different chunks, or already chunked the data
# upstream? Doesn't seem like this is ideal
# TODO: Refactor handling of dask arrays and chunking
#return np.concatenate(face_arrays, axis=jdim)
# the dask version doesn't work because of this:
# https://github.com/dask/dask/issues/1645
face_arrays_dask = [dsa.from_array(fa, chunks=fa.shape)
for fa in face_arrays]
concat = dsa.concatenate(face_arrays_dask, axis=jdim)
return concat
def _llc_face_shape(llc_id):
"""Given an integer identifier for the llc grid, return the face shape."""
# known valid LLC configurations
if llc_id in (90, 270, 1080, 2160, 4320):
return (llc_id, llc_id)
else:
raise ValueError("%g is not a valid llc identifier" % llc_id)
def _llc_data_shape(llc_id, nz=None):
"""Given an integer identifier for the llc grid, and possibly a number of
vertical grid points, return the expected shape of the full data field."""
# this is a constant for all LLC setups
NUM_FACES = 13
face_shape = _llc_face_shape(llc_id)
data_shape = (NUM_FACES,) + face_shape
if nz is not None:
data_shape = (nz,) + data_shape
# should we accomodate multiple records?
# no, not in this function
return data_shape
def read_all_variables(variable_list, file_metadata, use_mmap=False,
use_dask=False, chunks="3D"):
"""
Return a dictionary of dask arrays for variables in a MDS file
PARAMETERS
----------
variable_list : list
list of MITgcm variables, from fldList in .meta
file_metadata : dict
internal metadata for binary file
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
chunks : str, optional
Whether to read 2D (default) or 3D chunks
2D chunks are reading (x,y) levels and 3D chunks
are reading the a (x,y,z) field
RETURNS
-------
out : list
list of data arrays (dask.array, numpy.ndarray or memmap)
corresponding to variables from given list in the file
described by file_metadata
"""
out = []
for variable in variable_list:
if chunks == "2D":
out.append(read_2D_chunks(variable, file_metadata,
use_mmap=use_mmap, use_dask=use_dask))
elif chunks == "3D":
out.append(read_3D_chunks(variable, file_metadata,
use_mmap=use_mmap, use_dask=use_dask))
elif chunks == "CS":
out.append(read_CS_chunks(variable, file_metadata,
use_mmap=use_mmap, use_dask=use_dask))
return out
def read_CS_chunks(variable, file_metadata, use_mmap=False, use_dask=False):
"""
Return dask array for variable, from the file described by file_metadata,
using the "cube sphere chunks" method.
Parameters
----------
variable : string
name of the variable to read
file_metadata : dict
internal file_metadata for binary file
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
use_dask : bool, optional (not working yet)
collect the data lazily or eagerly
Returns
-------
dask array for variable, with 2d (ny, nx) chunks
or numpy.ndarray or memmap, depending on input args
"""
if (file_metadata['nx'] == 1) and (file_metadata['ny'] == 1) and \
(len(file_metadata['vars']) == 1):
# vertical coordinate
data_raw = read_raw_data(file_metadata['filename'],
file_metadata['dtype'],
(file_metadata['nz'],), use_mmap=use_mmap,
offset=0, order='C', partial_read=False)
shape = (file_metadata['nt'], file_metadata['nz'], 1,
file_metadata['ny'], file_metadata['nx'])
data_raw = np.reshape(data_raw, shape) # memmap -> ndarray
chunks = (file_metadata['nt'], 1, 1,
file_metadata['ny'], file_metadata['nx'])
data = dsa.from_array(data_raw, chunks=chunks)
else:
nfaces = len(file_metadata['ny_facets'])
shape = (file_metadata['nt'], file_metadata['nz'],
file_metadata['ny'], nfaces, file_metadata['nx'])
data_raw = read_raw_data(file_metadata['filename'],
file_metadata['dtype'],
shape, use_mmap=use_mmap,
offset=0, order='C', partial_read=False)
# data_raw = np.reshape(data_raw, shape) # memmap -> ndarray
chunks = (file_metadata['nt'], 1,
file_metadata['ny'], 1, file_metadata['nx'])
data = dsa.from_array(data_raw, chunks=chunks)
if not use_dask:
data = data.compute()
return data
def read_2D_chunks(variable, file_metadata, use_mmap=False, use_dask=False):
"""
Return dask array for variable, from the file described by file_metadata,
reading 2D chunks.
Parameters
----------
variable : string
name of the variable to read
file_metadata : dict
internal file_metadata for binary file
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
use_dask : bool, optional
collect the data lazily or eagerly
Returns
-------
dask array for variable, with 2d (ny, nx) chunks
or numpy.ndarray or memmap, depending on input args
"""
if (file_metadata['nx'] == 1) and (file_metadata['ny'] == 1) and \
(len(file_metadata['vars']) == 1):
# vertical coordinate
data_raw = read_raw_data(file_metadata['filename'],
file_metadata['dtype'],
(file_metadata['nz'],), use_mmap=use_mmap,
offset=0, order='C', partial_read=False)
shape = (file_metadata['nt'], file_metadata['nz'], 1,
file_metadata['ny'], file_metadata['nx'])
data_raw = np.reshape(data_raw, shape) # memmap -> ndarray
chunks = (file_metadata['nt'], 1, 1,
file_metadata['ny'], file_metadata['nx'])
data = dsa.from_array(data_raw, chunks=chunks)
else:
if file_metadata['has_faces']:
def load_chunk(face, lev, rec):
return _read_xy_chunk(variable, file_metadata, rec=rec,
lev=lev, face=face,
use_mmap=use_mmap)[None, None, None]
chunks = (1, 1, 1, file_metadata['nx'], file_metadata['nx'])
shape = (file_metadata['nt'], file_metadata['nz'],
len(file_metadata['face_facets']),
file_metadata['nx'], file_metadata['nx'])
name = 'llcmds-' + tokenize(file_metadata, variable)
dsk = {(name, rec, lev, face, 0, 0): (load_chunk, face,
lev, rec)
for face in range(len(file_metadata['face_facets']))
for lev in range(file_metadata['nz'])
for rec in range(file_metadata['nt'])}
else:
def load_chunk(lev, rec):
return _read_xy_chunk(variable, file_metadata,
rec=rec, lev=lev,
face=0, use_mmap=use_mmap)[None, None]
chunks = (1, 1, file_metadata['ny'], file_metadata['nx'])
shape = (file_metadata['nt'], file_metadata['nz'],
file_metadata['ny'], file_metadata['nx'])
name = 'mds-' + tokenize(file_metadata, variable)
dsk = {(name, rec, lev, 0, 0): (load_chunk, lev, rec)
for lev in range(file_metadata['nz'])
for rec in range(file_metadata['nt'])}
data = dsa.Array(dsk, name, chunks,
dtype=file_metadata['dtype'], shape=shape)
if not use_dask:
data = data.compute()
return data
def read_3D_chunks(variable, file_metadata, use_mmap=False, use_dask=False):
"""
Return dask array for variable, from the file described by file_metadata,
reading 3D chunks. Not suitable for llc data.
Parameters
----------
variable : string
name of the variable to read
file_metadata : dict
internal file_metadata for binary file
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
use_dask : bool, optional
collect the data lazily or eagerly
Returns
-------
dask array for variable, with 3d (nz, ny, nx) chunks
or numpy.ndarray or memmap, depending on input args
"""
def load_chunk(rec):
return _read_xyz_chunk(variable, file_metadata,
rec=rec,
use_mmap=use_mmap)[None]
chunks = (1, file_metadata['nz'], file_metadata['ny'], file_metadata['nx'])
shape = (file_metadata['nt'], file_metadata['nz'],
file_metadata['ny'], file_metadata['nx'])
name = 'mds-' + tokenize(file_metadata, variable)
dsk = {(name, rec, 0, 0, 0): (load_chunk, rec)
for rec in range(file_metadata['nt'])}
data = dsa.Array(dsk, name, chunks,
dtype=file_metadata['dtype'], shape=shape)
if not use_dask:
data = data.compute()
return data
def _read_xyz_chunk(variable, file_metadata, rec=0, use_mmap=False):
"""
Read a 3d chunk (x,y,z) of variable from file described in
file_metadata.
Parameters
----------
variable : string
name of the variable to read
file_metadata : dict
file_metadata for binary file
rec : integer, optional
time record to read (default=0)
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
Returns
-------
numpy array or memmap
"""
if file_metadata['has_faces'] and ((file_metadata['nx'] > 1) or
(file_metadata['ny'] > 1)):
raise ValueError(
"_read_xyz_chunk cannot be called with llc or cs type grid")
# size of the data element
nbytes = file_metadata['dtype'].itemsize
# byte order
file_metadata['datatype'] = file_metadata['dtype'].newbyteorder(
file_metadata['endian'])
# find index of variable
idx_var = file_metadata['vars'].index(variable)
# 1. compute offset_variable, init to zero
offset_vars = 0
# loop on variables before the one to read
for jvar in np.arange(idx_var):
# inspect its dimensions
dims = file_metadata['dims_vars'][jvar]
# compute the byte size of this variable
nbytes_thisvar = 1*nbytes
for dim in dims:
nbytes_thisvar = nbytes_thisvar*file_metadata[dim]
# update offset from previous variables
offset_vars = offset_vars+nbytes_thisvar
# 2. get dimensions of desired variable
dims = file_metadata['dims_vars'][idx_var]
# inquire for values of dimensions, else return 1
nt, nz, ny, nx = [file_metadata.get(dimname, 1)
for dimname in ('nt', 'nz', 'ny', 'nx')]
# 3. compute offset from previous records of current variable
if (rec > nt-1):
raise ValueError("time record %g greater than number of records %g" %
(rec, nt))
else:
offset_timerecords = rec * nz * ny * nx * nbytes
# 4. compute the offset of the previous variables, records and levels
offset = offset_vars + offset_timerecords
shape = (nz, ny, nx,)
# check if we do a partial read of the file
if (nt > 1) or (len(file_metadata['vars']) > 1):
partial_read = True
else:
partial_read = False
# define the order (row/column major)
# in conventional grids, it's in C
order = 'C'
# 5. Do the actual read
data = read_raw_data(file_metadata['filename'],
file_metadata['datatype'],
shape, use_mmap=use_mmap, offset=offset,
order=order, partial_read=partial_read)
return data
def _read_xy_chunk(variable, file_metadata, rec=0, lev=0, face=0,
use_mmap=False):
"""
Read a 2d chunk along (x,y) of variable from file described in
file_metadata.
Parameters
----------
variable : string
name of the variable to read
file_metadata : dict
file_metadata for binary file
rec : integer, optional
time record to read (default=0)
lev : integer, optional
vertical level to read (default=0)
face : integer, optional
face to read for llc configurations (default=0)
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
Returns
-------
numpy array or memmap
"""
# size of the data element
nbytes = file_metadata['dtype'].itemsize
# byte order
file_metadata['datatype'] = file_metadata['dtype'].newbyteorder(
file_metadata['endian'])
# find index of variable
idx_var = file_metadata['vars'].index(variable)
# 1. compute offset_variable, init to zero
offset_vars = 0
# loop on variables before the one to read
for jvar in np.arange(idx_var):
# inspect its dimensions
dims = file_metadata['dims_vars'][jvar]
# compute the byte size of this variable
nbytes_thisvar = 1*nbytes
for dim in dims:
nbytes_thisvar = nbytes_thisvar*file_metadata[dim]
# update offset from previous variables
offset_vars = offset_vars+nbytes_thisvar
# 2. get dimensions of desired variable
dims = file_metadata['dims_vars'][idx_var]
# inquire for values of dimensions, else return 1
nt, nz, ny, nx = [file_metadata.get(dimname, 1)
for dimname in ('nt', 'nz', 'ny', 'nx')]
# 3. compute offset from previous records of current variable
if (rec > nt-1):
raise ValueError("time record %g greater than number of records %g" %
(rec, nt))
else:
offset_timerecords = rec * nz * ny * nx * nbytes
# 4. compute offset from previous vertical levels of current variable
if (lev > nz-1):
raise ValueError("level %g is greater than number of levels %g" %
(lev, nz))
else:
offset_verticallevels = lev * ny * nx * nbytes
# 5. compute the offset of the previous variables, records and levels
offset = offset_vars + offset_timerecords + offset_verticallevels
# 6. compute offset due to faces
if file_metadata['has_faces']:
# determin which facet the face belong to
facet_origin = file_metadata['face_facets'][face]
# compute the offset from previous facets
ny_facets = np.array(file_metadata['ny_facets'])
nyglo_facets = np.concatenate(([0], ny_facets.cumsum()[:-1]), axis=0)
offset_facets = nyglo_facets[facet_origin] * \
file_metadata['nx'] * nbytes
# update offset
offset = offset + offset_facets
# shape if shape of the facet
shape = (file_metadata['ny_facets'][facet_origin], nx,)
else:
# no need to update offset and shape is simply:
shape = (ny, nx,)
# check if we do a partial read of the file
if (nt > 1) or (nz > 1) or (len(file_metadata['vars']) > 1) or \
file_metadata['has_faces']:
partial_read = True
else:
partial_read = False
# define the order (row/column major)
if file_metadata['has_faces']:
# in llc, we can have either C or F
order = file_metadata['facet_orders'][facet_origin]
else:
# in conventional grids, it's in C
order = 'C'
# 7. Do the actual read
data_raw = read_raw_data(file_metadata['filename'],
file_metadata['datatype'],
shape, use_mmap=use_mmap, offset=offset,
order=order, partial_read=partial_read)
# 8. Pad data, if needed
data_padded_after = _pad_array(data_raw, file_metadata, face=face)
# 9. extract the face from the facet
if file_metadata['has_faces'] and ('face_offsets' in file_metadata):
face_slice = slice(nx*file_metadata['face_offsets'][face],
nx*(file_metadata['face_offsets'][face]+1))
data = data_padded_after[face_slice]
else:
data = data_padded_after
# 10. Transpose face, if needed
if file_metadata['has_faces'] and ('transpose_face' in file_metadata):
if file_metadata['transpose_face'][face]:
data = data.transpose()
return data
def _pad_array(data, file_metadata, face=0):
"""
Return a padded array. If input data is a numpy.memmap and no padding
is necessary, the function preserves its type. Otherwise, the concatenate
forces it to load into memory.
Parameters
----------
data : numpy array or memmap
input data
file_metadata : dict
metadata for file
face : int, optional
llc face if applicable
Returns
-------
numpy.array or numpy.memmap
"""
# Pad data before in y direction
if 'pad_before_y' in file_metadata:
if file_metadata['has_faces']:
facet_origin = file_metadata['face_facets'][face]
nypad_before = file_metadata['pad_before_y'][facet_origin]
else:
nypad_before = file_metadata['pad_before_y']
pad_before = np.zeros((nypad_before, file_metadata['nx']))
data_padded_before = np.concatenate(
(pad_before, data), axis=0)
else:
data_padded_before = data
# Pad data after in y direction
if 'pad_after_y' in file_metadata:
if file_metadata['has_faces']:
facet_origin = file_metadata['face_facets'][face]
nypad_after = file_metadata['pad_after_y'][facet_origin]
else:
nypad_after = file_metadata['pad_after_y']
pad_after = np.zeros((nypad_after, file_metadata['nx']))
data_padded_after = np.concatenate(
(data_padded_before, pad_after), axis=0)
else:
data_padded_after = data_padded_before
return data_padded_after
def get_extra_metadata(domain='llc', nx=90):
"""
Return the extra_metadata dictionay for selected domains
PARAMETERS
----------
domain : str
domain can be llc, aste, cs
nx : int
size of the face in the x direction
RETURNS
-------
extra_metadata : dict
all extra_metadata to handle multi-faceted grids
"""
available_domains = ['llc', 'aste', 'cs']
if domain not in available_domains:
raise ValueError('not an available domain')
# domains
llc = {'has_faces': True, 'ny': 13*nx, 'nx': nx,
'ny_facets': [3*nx, 3*nx, nx, 3*nx, 3*nx],
'face_facets': [0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
'facet_orders': ['C', 'C', 'C', 'F', 'F'],
'face_offsets': [0, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 1, 2],
'transpose_face': [False, False, False,
False, False, False, False,
True, True, True, True, True, True]}
aste = {'has_faces': True, 'ny': 5*nx, 'nx': nx,
'ny_facets': [int(5*nx/3.), 0, nx,
int(2*nx/3.), int(5*nx/3.)],
'pad_before_y': [int(1*nx/3.), 0, 0, 0, 0],
'pad_after_y': [0, 0, 0, int(1*nx/3.), int(1*nx/3.)],
'face_facets': [0, 0, 2, 3, 4, 4],
'facet_orders': ['C', 'C', 'C', 'F', 'F'],
'face_offsets': [0, 1, 0, 0, 0, 1],
'transpose_face': [False, False, False,
True, True, True]}
cs = {'has_faces': True, 'ny': nx, 'nx': nx,
'ny_facets': [nx, nx, nx, nx, nx, nx],
'face_facets': [0, 1, 2, 3, 4, 5],
'facet_orders': ['F', 'F', 'F', 'F', 'F', 'F'],
'face_offsets': [0, 0, 0, 0, 0, 0],
'transpose_face': [False, False, False,
False, False, False]}
if domain == 'llc':
extra_metadata = llc
elif domain == 'aste':
extra_metadata = aste
elif domain == 'cs':
extra_metadata = cs
return extra_metadata
def get_grid_from_input(gridfile, nx=None, ny=None, geometry='llc',
dtype=np.dtype('d'), endian='>', use_dask=False, outer=False,
extra_metadata=None):
"""
Read grid variables from grid input files, this is especially useful
for llc and cube sphere configurations used with land tiles
elimination. Reading the input grid files (e.g. tile00[1-5].mitgrid)
allows to fill in the blanks of eliminated land tiles.
PARAMETERS
----------
gridfile : str
gridfile must contain <NFACET> as wildcard (e.g. tile<NFACET>.mitgrid)
nx : int
size of the face in the x direction
ny : int
size of the face in the y direction
geometry : str
domain geometry can be llc, cs or carthesian not supported yet
dtype : np.dtype
numeric precision (single/double) of input data
endian : string
endianness of input data
use_dask : bool
use dask or not
outer : bool
include outer boundary or not
extra_metadata : dict
dictionary of extra metadata, needed for llc configurations
RETURNS
-------
grid : xarray.Dataset
all grid variables
"""
file_metadata = {}
# grid variables are stored in this order
file_metadata['fldList'] = ['XC', 'YC', 'DXF', 'DYF', 'RAC',
'XG', 'YG', 'DXV', 'DYU', 'RAZ',
'DXC', 'DYC', 'RAW', 'RAS', 'DXG', 'DYG']
outerx_vars = ['DXC', 'RAW', 'DYG'] if outer else []
outery_vars = ['DYC', 'RAS', 'DXG'] if outer else []
outerxy_vars = ['XG', 'YG', 'RAZ'] if outer else []
file_metadata['vars'] = file_metadata['fldList']
dims_vars_list = []
for var in file_metadata['fldList']:
dims_vars_list.append(('ny', 'nx'))
file_metadata['dims_vars'] = dims_vars_list
# no vertical levels or time records
file_metadata['nz'] = 1
file_metadata['nt'] = 1
# for curvilinear non-facet grids (TO DO)
# if nx is not None:
# file_metadata['nx'] = nx
# if ny is not None:
# file_metadata['ny'] = ny
if extra_metadata is not None:
file_metadata.update(extra_metadata)
# numeric representation
file_metadata['endian'] = endian
file_metadata['dtype'] = dtype
if geometry in ['llc', 'cs']:
try:
nfaces = len(file_metadata['face_facets'])
except:
raise ValueError('metadata must contain face_facets')
if geometry == 'llc':
nfacets = 5
elif geometry == 'cs':
nfacets = 6
# create placeholders for data
gridfields = {}
for field in file_metadata['fldList']:
gridfields.update({field: None})
if geometry in ['llc', 'cs']:
for kfacet in range(nfacets):
# we need to adapt the metadata to the grid file
grid_metadata = file_metadata.copy()
fname = gridfile.replace('<NFACET>', str(kfacet+1).zfill(3))
grid_metadata['filename'] = fname
if file_metadata['facet_orders'][kfacet] == 'C':
nxgrid = file_metadata['nx'] + 1
nygrid = file_metadata['ny_facets'][kfacet] + 1
elif file_metadata['facet_orders'][kfacet] == 'F':
nxgrid = file_metadata['ny_facets'][kfacet] + 1
nygrid = file_metadata['nx'] + 1
grid_metadata.update({'nx': nxgrid, 'ny': nygrid,
'has_faces': False})
raw = read_all_variables(grid_metadata['vars'], grid_metadata,
use_dask=use_dask)
rawfields = {}
for kfield in np.arange(len(file_metadata['fldList'])):
rawfields.update(
{file_metadata['fldList'][kfield]: raw[kfield]})
for field in file_metadata['fldList']:
# get the full array
tmp = rawfields[field].squeeze()
# transpose
if grid_metadata['facet_orders'][kfacet] == 'F':
tmp = tmp.transpose()
for face in np.arange(nfaces):
# identify faces that need to be filled
if grid_metadata['face_facets'][face] == kfacet:
# get offset of face from facet
offset = file_metadata['face_offsets'][face]
nx = file_metadata['nx'] + 1
nxm1 = file_metadata['nx']
pad_metadata = file_metadata.copy()
pad_metadata['nx'] = file_metadata['nx'] + 1
# pad data, if needed (would trigger eager data eval)
# needs a new array not to pad multiple times
padded = _pad_array(tmp, pad_metadata, face=face)
# extract the data
dataface = padded[offset*nxm1:offset*nxm1 + nx, :]
# transpose, if needed
if file_metadata['transpose_face'][face]:
dataface = dataface.transpose()
# remove irrelevant data
if field in outerx_vars:
dataface = dataface[..., :-1, :].squeeze()
elif field in outery_vars:
dataface = dataface[..., :-1].squeeze()
elif field in outerxy_vars:
dataface = dataface.squeeze()
else:
dataface = dataface[..., :-1, :-1].squeeze()
# assign values
dataface = dsa.stack([dataface], axis=0)
if face == 0:
gridfields[field] = dataface
else:
gridfields[field] = dsa.concatenate(
[gridfields[field], dataface], axis=0)
# create the dataset
nxouter = file_metadata['nx'] + 1 if outer else file_metadata['nx']
if geometry == 'llc':
grid = xr.Dataset({'XC': (['face', 'j', 'i'], gridfields['XC']),
'YC': (['face', 'j', 'i'], gridfields['YC']),
'DXF': (['face', 'j', 'i'], gridfields['DXF']),
'DYF': (['face', 'j', 'i'], gridfields['DYF']),
'RAC': (['face', 'j', 'i'], gridfields['RAC']),
'XG': (['face', 'j_g', 'i_g'], gridfields['XG']),
'YG': (['face', 'j_g', 'i_g'], gridfields['YG']),
'DXV': (['face', 'j', 'i'], gridfields['DXV']),
'DYU': (['face', 'j', 'i'], gridfields['DYU']),
'RAZ': (['face', 'j_g', 'i_g'], gridfields['RAZ']),
'DXC': (['face', 'j', 'i_g'], gridfields['DXC']),
'DYC': (['face', 'j_g', 'i'], gridfields['DYC']),
'RAW': (['face', 'j', 'i_g'], gridfields['RAW']),
'RAS': (['face', 'j_g', 'i'], gridfields['RAS']),
'DXG': (['face', 'j_g', 'i'], gridfields['DXG']),
'DYG': (['face', 'j', 'i_g'], gridfields['DYG'])
},
coords={'i': (['i'], np.arange(file_metadata['nx'])),
'j': (['j'], np.arange(file_metadata['nx'])),
'i_g': (['i_g'],
np.arange(nxouter)),
'j_g': (['j_g'],
np.arange(nxouter)),
'face': (['face'], np.arange(nfaces))
}
)
elif geometry == 'cs':
grid = xr.Dataset({'XC': (['face', 'i', 'j'], gridfields['XC']),
'YC': (['face', 'i', 'j'], gridfields['YC']),
'DXF': (['face', 'i', 'j'], gridfields['DXF']),
'DYF': (['face', 'i', 'j'], gridfields['DYF']),
'RAC': (['face', 'i', 'j'], gridfields['RAC']),
'XG': (['face', 'i_g', 'j_g'], gridfields['XG']),
'YG': (['face', 'i_g', 'j_g'], gridfields['YG']),
'DXV': (['face', 'i', 'j'], gridfields['DXV']),
'DYU': (['face', 'i', 'j'], gridfields['DYU']),
'RAZ': (['face', 'i_g', 'j_g'], gridfields['RAZ']),
'DXC': (['face', 'i', 'j_g'], gridfields['DXC']),
'DYC': (['face', 'i_g', 'j'], gridfields['DYC']),
'RAW': (['face', 'i', 'j_g'], gridfields['RAW']),
'RAS': (['face', 'i_g', 'j'], gridfields['RAS']),
'DXG': (['face', 'i_g', 'j'], gridfields['DXG']),
'DYG': (['face', 'i', 'j_g'], gridfields['DYG'])
},
coords={'i': (['i'], np.arange(file_metadata['nx'])),
'j': (['j'], np.arange(file_metadata['nx'])),
'i_g': (['i_g'],
np.arange(nxouter)),
'j_g': (['j_g'],
np.arange(nxouter)),
'face': (['face'], np.arange(nfaces))
}
)
else: # pragma: no cover
nyouter = file_metadata['ny'] + 1 if outer else file_metadata['ny']
grid = xr.Dataset({'XC': (['j', 'i'], gridfields['XC']),
'YC': (['j', 'i'], gridfields['YC']),
'DXF': (['j', 'i'], gridfields['DXF']),
'DYF': (['j', 'i'], gridfields['DYF']),
'RAC': (['j', 'i'], gridfields['RAC']),
'XG': (['j_g', 'i_g'], gridfields['XG']),
'YG': (['j_g', 'i_g'], gridfields['YG']),
'DXV': (['j', 'i'], gridfields['DXV']),
'DYU': (['j', 'i'], gridfields['DYU']),
'RAZ': (['j_g', 'i_g'], gridfields['RAZ']),
'DXC': (['j', 'i_g'], gridfields['DXC']),
'DYC': (['j_g', 'i'], gridfields['DYC']),
'RAW': (['j', 'i_g'], gridfields['RAW']),
'RAS': (['j_g', 'i'], gridfields['RAS']),
'DXG': (['j_g', 'i'], gridfields['DXG']),
'DYG': (['j', 'i_g'], gridfields['DYG'])
},
coords={'i': (['i'], np.arange(file_metadata['nx'])),
'j': (['j'], np.arange(file_metadata['ny'])),
'i_g': (['i_g'],
np.arange(nxouter)),
'j_g': (['j_g'],
np.arange(nyouter))
}
)
return grid
########## WRITING BINARIES #############################
def find_concat_dim_facet(da, facet, extra_metadata):
""" In llc grids, find along which horizontal dimension to concatenate
facet between i, i_g and j, j_g. If the order of the facet is F, concat
along i or i_g. If order is C, concat along j or j_g. Also return
horizontal dim not to concatenate
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
facet : int
facet number
extra_metadata : dict
dict of extra_metadata from get_extra_metadata
RETURNS
-------
concat_dim, nonconcat_dim : str, str
names of the dimensions for concatenation or not
"""
order = extra_metadata['facet_orders'][facet]
if order == 'C':
possible_concat_dims = ['j', 'j_g']
elif order == 'F':
possible_concat_dims = ['i', 'i_g']
concat_dim = find_concat_dim(da, possible_concat_dims)
# we also need to other horizontal dimension for vector indexing
all_dims = list(da.dims)
# discard face
all_dims.remove('face')
# remove the concat_dim to find horizontal non_concat dimension
all_dims.remove(concat_dim)
non_concat_dim = all_dims[0]
return concat_dim, non_concat_dim
def find_concat_dim(da, possible_concat_dims):
""" look for available dimensions in dataaray and pick the one
from a list of candidates
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
possible_concat_dims : list
list of potential dims
RETURNS
-------
out : str
dimension on which to concatenate
"""
out = None
for d in possible_concat_dims:
if d in da.dims:
out = d
return out
def rebuild_llc_facets(da, extra_metadata):
""" For LLC grids, rebuilds facets from a xmitgcm dataarray and
store into a dictionary
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
extra_metadata : dict
dict of extra_metadata from get_extra_metadata
RETURNS
-------
facets : dict
all facets data in xarray.DataArray form packed into a dictionary
"""
nfacets = len(extra_metadata['facet_orders'])
nfaces = len(extra_metadata['face_facets'])
facets = {}
# rebuild the facets (with padding if present)
for kfacet in range(nfacets):
facets.update({'facet' + str(kfacet): None})
concat_dim, non_concat_dim = find_concat_dim_facet(
da, kfacet, extra_metadata)
for kface in range(nfaces):
# concatenate faces back into facets
if extra_metadata['face_facets'][kface] == kfacet:
if extra_metadata['face_offsets'][kface] == 0:
# first face of facet
tmp = da.sel(face=kface)
else:
# any other face needs to be concatenated
newface = da.sel(face=kface)
tmp = xr.concat([facets['facet' + str(kfacet)],
newface], dim=concat_dim)
facets['facet' + str(kfacet)] = tmp
# if present, remove padding from facets
for kfacet in range(nfacets):
concat_dim, non_concat_dim = find_concat_dim_facet(
da, kfacet, extra_metadata)
# remove pad before
if 'pad_before_y' in extra_metadata:
pad = extra_metadata['pad_before_y'][kfacet]
# padded array
padded = facets['facet' + str(kfacet)]
if pad != 0:
# we need to relabel the grid cells
ng = len(padded[concat_dim].values)
padded[concat_dim] = np.arange(ng)
# select index from non-padded array
unpadded_bef = padded.isel({concat_dim: range(pad, ng)})
else:
unpadded_bef = padded
facets['facet' + str(kfacet)] = unpadded_bef
# remove pad after
if 'pad_after_y' in extra_metadata:
pad = extra_metadata['pad_after_y'][kfacet]
# padded array
padded = facets['facet' + str(kfacet)]
if pad != 0:
# we need to relabel the grid cells
ng = len(padded[concat_dim].values)
padded[concat_dim] = np.arange(ng)
# select index from non-padded array
last = ng-pad
unpadded_aft = padded.isel({concat_dim: range(last)})
else:
unpadded_aft = padded
facets['facet' + str(kfacet)] = unpadded_aft
return facets
def llc_facets_3d_spatial_to_compact(facets, dimname, extra_metadata):
""" Write in compact form a list of 3d facets
PARAMETERS
----------
facets : dict
dict of xarray.dataarrays for the facets
extra_metadata : dict
extra_metadata from get_extra_metadata
RETURNS
-------
flatdata : numpy.array
all the data in vector form
"""
nz = len(facets['facet0'][dimname])
nfacets = len(facets)
flatdata = np.array([])
for kz in range(nz):
# rebuild the dict
tmpdict = {}
for kfacet in range(nfacets):
this_facet = facets['facet' + str(kfacet)]
if this_facet is not None:
tmpdict['facet' + str(kfacet)] = this_facet.isel(k=kz)
else:
tmpdict['facet' + str(kfacet)] = None
# concatenate all 2d arrays
compact2d = llc_facets_2d_to_compact(tmpdict, extra_metadata)
flatdata = np.concatenate([flatdata, compact2d])
return flatdata
def llc_facets_2d_to_compact(facets, extra_metadata):
""" Write in compact form a list of 2d facets
PARAMETERS
----------
facets: dict
dict of xarray.dataarrays for the facets
extra_metadata: dict
extra_metadata from get_extra_metadata
RETURNS
-------
flatdata : numpy.array
all the data in vector form
"""
flatdata = np.array([])
# loop over facets
for kfacet in range(len(facets)):
if facets['facet' + str(kfacet)] is not None:
tmp = np.reshape(facets['facet' + str(kfacet)].values, (-1))
flatdata = np.concatenate([flatdata, tmp])
return flatdata
def write_to_binary(flatdata, fileout, dtype=np.dtype('f')):
""" write data in binary file
PARAMETERS
----------
flatdata: numpy.array
vector of data to write
fileout: str
output file name
dtype: np.dtype
single/double precision
RETURNS
-------
None
"""
# write data to binary files
fid = open(fileout, "wb")
tmp = flatdata.astype(dtype)
if sys.byteorder == 'little':
tmp = tmp.byteswap(True)
fid.write(tmp.tobytes())
fid.close()
return None
| xgcm/xmitgcm | xmitgcm/utils.py | Python | mit | 66,760 |
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
from taiga.projects import models
def update_projects_order_in_bulk(bulk_data:list, field:str, user):
"""
Update the order of user projects in the user membership.
`bulk_data` should be a list of tuples with the following format:
[(<project id>, {<field>: <value>, ...}), ...]
"""
membership_ids = []
new_order_values = []
for membership_data in bulk_data:
project_id = membership_data["project_id"]
membership = user.memberships.get(project_id=project_id)
membership_ids.append(membership.id)
new_order_values.append({field: membership_data["order"]})
from taiga.base.utils import db
db.update_in_bulk_with_ids(membership_ids, new_order_values, model=models.Membership)
@transaction.atomic
def bulk_update_userstory_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_userstorystatus set "order" = $1
where projects_userstorystatus.id = $2 and
projects_userstorystatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_points_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_points set "order" = $1
where projects_points.id = $2 and
projects_points.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_taskstatus set "order" = $1
where projects_taskstatus.id = $2 and
projects_taskstatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_status_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_issuestatus set "order" = $1
where projects_issuestatus.id = $2 and
projects_issuestatus.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_type_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_issuetype set "order" = $1
where projects_issuetype.id = $2 and
projects_issuetype.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_priority_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_priority set "order" = $1
where projects_priority.id = $2 and
projects_priority.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_severity_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update projects_severity set "order" = $1
where projects_severity.id = $2 and
projects_severity.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
| WALR/taiga-back | taiga/projects/services/bulk_update_order.py | Python | agpl-3.0 | 5,428 |
from sys import argv
script, filename = argv
txt = open(filename)
print('Here\'s your file %r:' % filename)
print(txt.read())
print('Type the filename again:')
file_again = input('>')
txt_again = open(file_again)
print(txt_again.read()) | johnwang117/learn-python-the-hard-way | ex15.py | Python | gpl-3.0 | 242 |
def test_azure_template(check_cli_success, azure_config_path):
info, desc = check_cli_success(azure_config_path)
def test_azure_template_with_helper(check_cli_success, azure_with_helper_config_path):
info, desc = check_cli_success(azure_with_helper_config_path)
| dcos/dcos-launch | test/test_arm.py | Python | apache-2.0 | 272 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import ctypes
import os.path as osp
from sys import platform
def get_ext():
return ".dylib" if platform == "darwin" else ".so"
def load_dll(dll):
try:
return [ctypes.CDLL(dll, ctypes.RTLD_GLOBAL)]
except OSError:
return []
def load_sw():
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
sw_libname = "libsw" + get_ext()
sw_lib = osp.join(cur_path, "..", "build", sw_libname)
load_dll(sw_lib)
def init(hw_backend):
"""Init hardware and software shared library for accelerator
Parameters
------------
hw_backend : str
Hardware backend can be verilog or chisel
"""
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
hw_libname = "libhw" + get_ext()
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
m = tvm.module.load(hw_lib, "vta-tsim")
load_sw()
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
def load_module():
load_sw()
return tvm.get_global_func("tvm.vta.driver")
| mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/apps/tsim_example/python/tsim.py | Python | apache-2.0 | 1,886 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.errors import IngestEmailError
from superdesk.io.feed_parsers.rfc822 import EMailRFC822FeedParser
from superdesk.tests import TestCase
class RFC822JsonFormatter(TestCase):
vocab = [{'_id': 'categories', 'items': [{'is_active': True, 'name': 'Domestic Sport', 'qcode': 's'}]}]
desk = [{'_id': 1, 'name': 'Brisbane'}]
user = [{'_id': 1, 'email': '[email protected]', 'byline': 'A Mock Up', 'sign_off': 'TA'}]
def setUp(self):
self.app.data.insert('vocabularies', self.vocab)
self.app.data.insert('desks', self.desk)
self.app.data.insert('users', self.user)
with self.app.app_context():
self.provider = {'name': 'Test', 'config': {'formatted': True}}
def test_formatted_email_parser(self):
filename = 'json-email.txt'
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = EMailRFC822FeedParser()
self.items = parser.parse([(1, bytes)], self.provider)
self.assertEqual(self.items[0]['priority'], 5)
self.assertEqual(self.items[0]['sign_off'], 'TA')
self.assertEqual(self.items[0]['anpa_category'], [{'qcode': 's'}])
self.assertEqual(self.items[0]['body_html'], '<p>Lorem ipsum</p>')
self.assertEqual(self.items[0]['abstract'], 'Abstract-2')
self.assertEqual(self.items[0]['headline'], 'Headline-2')
self.assertEqual(self.items[0]['original_creator'], 1)
self.assertEqual(self.items[0]['task']['desk'], 1)
self.assertEqual(self.items[0]['urgency'], 4)
self.assertEqual(self.items[0]['type'], 'text')
self.assertEqual(self.items[0]['guid'], '<[email protected]>')
self.assertEqual(self.items[0]['original_source'], '[email protected]')
self.assertEqual(self.items[0]['slugline'], 'Slugline-2')
self.assertEqual(self.items[0]['byline'], 'A Mock Up')
def test_bad_user(self):
filename = 'json-email-bad-user.txt'
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.join(dirname, 'fixtures', filename)
with open(fixture, mode='rb') as f:
bytes = f.read()
parser = EMailRFC822FeedParser()
try:
with self.assertRaises(IngestEmailError) as exc_context:
self.items = parser.parse([(1, bytes)], self.provider)
except:
self.fail('Expected exception type was not raised.')
ex = exc_context.exception
self.assertEqual(ex.code, 6001)
| nistormihai/superdesk-core | apps/io/formatted_email_test.py | Python | agpl-3.0 | 2,980 |
#!/usr/bin/python3
import sys
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
TIMEOUT = 5
DEBUG = False
if DEBUG:
CHROME_DRIVER_PATH=""
CFG_FNAME = ""
else:
CHROME_DRIVER_PATH="/bin/chromedriver"
CFG_FNAME = "/app/watch.json"
class Browser:
def __enter__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-gpu-sandbox')
options.add_argument("--single-process")
options.add_argument('window-size=1920x1080')
options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36")
if not DEBUG:
options.binary_location = "/usr/bin/google-chrome"
self.browser = webdriver.Chrome(
executable_path=CHROME_DRIVER_PATH, options=options)
return self.browser
def __exit__(self, type, value, traceback):
self.browser.quit()
def get_config(fname):
with open(fname) as data:
return json.load(data)
def get_element(by, txt):
element_present = EC.presence_of_element_located((by, txt))
WebDriverWait(browser, TIMEOUT).until(element_present)
return browser.find_element(by, txt)
def accept_cookies():
browser.get('https://finance.yahoo.com/')
elem = get_element(By.NAME, 'agree') # Find the search box
elem.click()
def get_stock(stock) -> float:
browser.get('https://finance.yahoo.com/')
elem = get_element(By.NAME, 'yfin-usr-qry') # Find the search box
elem.send_keys(stock + Keys.RETURN)
css_locator = 'span[data-reactid="32"]'
return float(get_element(By.CSS_SELECTOR, css_locator).text)
def get_currency(curr):
css_locator = 'span[data-reactid="32"]'
browser.get(f'https://finance.yahoo.com/quote/{curr}=x/')
return float(get_element(By.CSS_SELECTOR, css_locator).text)
with Browser() as browser:
accept_cookies()
total = 0
print("="*80)
for stock in get_config(CFG_FNAME).get('stocks'):
stockprice = get_stock(stock.get('symbol'))
currency = get_currency(stock.get('currency'))
count = stock.get('count')
name = stock.get('name')
total_per_stock = stockprice * currency * count
total += total_per_stock
print(f"Stock {name} costs {stockprice} per share, with {count} shares totalling {total_per_stock}")
print("="*80)
print(f"Total price of stocks is {total}")
print("="*80)
| koss822/misc | Python/yahoo-scraper/app/scraper.py | Python | gpl-3.0 | 2,857 |
from init import *
| treverhines/ModEst | modest/pymls/__init__.py | Python | mit | 19 |
from datetime import date
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, reverse
from django.template.response import TemplateResponse
from django.utils.translation import npgettext_lazy, pgettext_lazy
from django.views.decorators.http import require_POST
from ...core.utils import get_paginator_items
from ...discount.models import Sale
from ...product.models import (
Attribute, AttributeValue, Product, ProductImage, ProductType,
ProductVariant)
from ...product.utils.availability import get_product_availability
from ...product.utils.costs import (
get_margin_for_variant, get_product_costs_data)
from ..views import staff_member_required
from . import forms
from .filters import AttributeFilter, ProductFilter, ProductTypeFilter
@staff_member_required
@permission_required('product.manage_products')
def product_list(request):
products = Product.objects.prefetch_related('images')
products = products.order_by('name')
product_types = ProductType.objects.all()
product_filter = ProductFilter(request.GET, queryset=products)
products = get_paginator_items(
product_filter.qs, settings.DASHBOARD_PAGINATE_BY,
request.GET.get('page'))
ctx = {
'bulk_action_form': forms.ProductBulkUpdate(),
'products': products, 'product_types': product_types,
'filter_set': product_filter,
'is_empty': not product_filter.queryset.exists()}
return TemplateResponse(request, 'dashboard/product/list.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_details(request, pk):
products = Product.objects.prefetch_related('variants', 'images').all()
product = get_object_or_404(products, pk=pk)
variants = product.variants.all()
images = product.images.all()
availability = get_product_availability(
product, discounts=request.discounts, taxes=request.taxes)
sale_price = availability.price_range_undiscounted
discounted_price = availability.price_range
purchase_cost, margin = get_product_costs_data(product)
# no_variants is True for product types that doesn't require variant.
# In this case we're using the first variant under the hood to allow stock
# management.
no_variants = not product.product_type.has_variants
only_variant = variants.first() if no_variants else None
ctx = {
'product': product, 'sale_price': sale_price,
'discounted_price': discounted_price, 'variants': variants,
'images': images, 'no_variants': no_variants,
'only_variant': only_variant, 'purchase_cost': purchase_cost,
'margin': margin, 'is_empty': not variants.exists()}
return TemplateResponse(request, 'dashboard/product/detail.html', ctx)
@require_POST
@staff_member_required
@permission_required('product.manage_products')
def product_toggle_is_published(request, pk):
product = get_object_or_404(Product, pk=pk)
product.is_published = not product.is_published
product.save(update_fields=['is_published'])
return JsonResponse(
{'success': True, 'is_published': product.is_published})
@staff_member_required
@permission_required('product.manage_products')
def product_select_type(request):
"""View for add product modal embedded in the product list view."""
form = forms.ProductTypeSelectorForm(request.POST or None)
status = 200
if form.is_valid():
redirect_url = reverse(
'dashboard:product-add',
kwargs={'type_pk': form.cleaned_data.get('product_type').pk})
return (
JsonResponse({'redirectUrl': redirect_url})
if request.is_ajax() else redirect(redirect_url))
elif form.errors:
status = 400
ctx = {'form': form}
template = 'dashboard/product/modal/select_type.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
@permission_required('product.manage_products')
def product_create(request, type_pk):
track_inventory = request.site.settings.track_inventory_by_default
product_type = get_object_or_404(ProductType, pk=type_pk)
create_variant = not product_type.has_variants
product = Product()
product.product_type = product_type
product_form = forms.ProductForm(request.POST or None, instance=product)
if create_variant:
variant = ProductVariant(
product=product, track_inventory=track_inventory)
variant_form = forms.ProductVariantForm(
request.POST or None,
instance=variant, prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if product_form.is_valid() and not variant_errors:
product = product_form.save()
if create_variant:
variant.product = product
variant_form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product %s') % (product,)
messages.success(request, msg)
return redirect('dashboard:product-details', pk=product.pk)
ctx = {
'product_form': product_form, 'variant_form': variant_form,
'product': product}
return TemplateResponse(request, 'dashboard/product/form.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_edit(request, pk):
product = get_object_or_404(
Product.objects.prefetch_related('variants'), pk=pk)
form = forms.ProductForm(request.POST or None, instance=product)
edit_variant = not product.product_type.has_variants
if edit_variant:
variant = product.variants.first()
variant_form = forms.ProductVariantForm(
request.POST or None, instance=variant, prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if form.is_valid() and not variant_errors:
product = form.save()
if edit_variant:
variant_form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product %s') % (product,)
messages.success(request, msg)
return redirect('dashboard:product-details', pk=product.pk)
ctx = {
'product': product, 'product_form': form, 'variant_form': variant_form}
return TemplateResponse(request, 'dashboard/product/form.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == 'POST':
product.delete()
msg = pgettext_lazy(
'Dashboard message', 'Removed product %s') % (product,)
messages.success(request, msg)
return redirect('dashboard:product-list')
return TemplateResponse(
request,
'dashboard/product/modal/confirm_delete.html',
{'product': product})
@require_POST
@staff_member_required
@permission_required('product.manage_products')
def product_bulk_update(request):
form = forms.ProductBulkUpdate(request.POST)
if form.is_valid():
form.save()
count = len(form.cleaned_data['products'])
msg = npgettext_lazy(
'Dashboard message',
'%(count)d product has been updated',
'%(count)d products have been updated',
number='count') % {'count': count}
messages.success(request, msg)
return redirect('dashboard:product-list')
@staff_member_required
def ajax_products_list(request):
"""Return products filtered by request GET parameters.
Response format is that of a Select2 JS widget.
"""
queryset = (
Product.objects.all()
if request.user.has_perm('product.manage_products')
else Product.objects.published())
search_query = request.GET.get('q', '')
if search_query:
queryset = queryset.filter(Q(name__icontains=search_query))
products = [
{'id': product.id, 'text': str(product)} for product in queryset]
return JsonResponse({'results': products})
@staff_member_required
@permission_required('product.manage_products')
def product_type_list(request):
types = ProductType.objects.all().prefetch_related(
'product_attributes', 'variant_attributes').order_by('name')
type_filter = ProductTypeFilter(request.GET, queryset=types)
types = get_paginator_items(
type_filter.qs, settings.DASHBOARD_PAGINATE_BY,
request.GET.get('page'))
types.object_list = [
(pt.pk, pt.name, pt.product_attributes.all(),
pt.variant_attributes.all()) for pt in types.object_list]
ctx = {
'product_types': types, 'filter_set': type_filter,
'is_empty': not type_filter.queryset.exists()}
return TemplateResponse(
request,
'dashboard/product/product_type/list.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_type_create(request):
product_type = ProductType()
form = forms.ProductTypeForm(request.POST or None, instance=product_type)
if form.is_valid():
product_type = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product type %s') % (product_type,)
messages.success(request, msg)
return redirect('dashboard:product-type-list')
ctx = {'form': form, 'product_type': product_type}
return TemplateResponse(
request,
'dashboard/product/product_type/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_type_edit(request, pk):
product_type = get_object_or_404(ProductType, pk=pk)
form = forms.ProductTypeForm(request.POST or None, instance=product_type)
if form.is_valid():
product_type = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product type %s') % (product_type,)
messages.success(request, msg)
return redirect('dashboard:product-type-update', pk=pk)
ctx = {'form': form, 'product_type': product_type}
return TemplateResponse(
request,
'dashboard/product/product_type/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_type_delete(request, pk):
product_type = get_object_or_404(ProductType, pk=pk)
if request.method == 'POST':
product_type.delete()
msg = pgettext_lazy(
'Dashboard message', 'Removed product type %s') % (product_type,)
messages.success(request, msg)
return redirect('dashboard:product-type-list')
ctx = {
'product_type': product_type,
'products': product_type.products.all()}
return TemplateResponse(
request,
'dashboard/product/product_type/modal/confirm_delete.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def variant_details(request, product_pk, variant_pk):
product = get_object_or_404(Product, pk=product_pk)
variant = get_object_or_404(product.variants.all(), pk=variant_pk)
# If the product type of this product assumes no variants, redirect to
# product details page that has special UI for products without variants.
if not product.product_type.has_variants:
return redirect('dashboard:product-details', pk=product.pk)
images = variant.images.all()
margin = get_margin_for_variant(variant)
discounted_price = variant.get_price(
discounts=Sale.objects.active(date.today())).gross
ctx = {
'images': images, 'product': product, 'variant': variant,
'margin': margin, 'discounted_price': discounted_price}
return TemplateResponse(
request,
'dashboard/product/product_variant/detail.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def variant_create(request, product_pk):
track_inventory = request.site.settings.track_inventory_by_default
product = get_object_or_404(Product.objects.all(), pk=product_pk)
variant = ProductVariant(product=product, track_inventory=track_inventory)
form = forms.ProductVariantForm(
request.POST or None,
instance=variant)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Saved variant %s') % (variant.name,)
messages.success(request, msg)
return redirect(
'dashboard:variant-details', product_pk=product.pk,
variant_pk=variant.pk)
ctx = {'form': form, 'product': product, 'variant': variant}
return TemplateResponse(
request,
'dashboard/product/product_variant/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def variant_edit(request, product_pk, variant_pk):
product = get_object_or_404(Product.objects.all(), pk=product_pk)
variant = get_object_or_404(product.variants.all(), pk=variant_pk)
form = forms.ProductVariantForm(request.POST or None, instance=variant)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Saved variant %s') % (variant.name,)
messages.success(request, msg)
return redirect(
'dashboard:variant-details', product_pk=product.pk,
variant_pk=variant.pk)
ctx = {'form': form, 'product': product, 'variant': variant}
return TemplateResponse(
request,
'dashboard/product/product_variant/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def variant_delete(request, product_pk, variant_pk):
product = get_object_or_404(Product, pk=product_pk)
variant = get_object_or_404(product.variants, pk=variant_pk)
if request.method == 'POST':
variant.delete()
msg = pgettext_lazy(
'Dashboard message', 'Removed variant %s') % (variant.name,)
messages.success(request, msg)
return redirect('dashboard:product-details', pk=product.pk)
ctx = {
'is_only_variant': product.variants.count() == 1, 'product': product,
'variant': variant}
return TemplateResponse(
request,
'dashboard/product/product_variant/modal/confirm_delete.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def variant_images(request, product_pk, variant_pk):
product = get_object_or_404(Product, pk=product_pk)
qs = product.variants.prefetch_related('images')
variant = get_object_or_404(qs, pk=variant_pk)
form = forms.VariantImagesSelectForm(request.POST or None, variant=variant)
if form.is_valid():
form.save()
return redirect(
'dashboard:variant-details', product_pk=product.pk,
variant_pk=variant.pk)
ctx = {'form': form, 'product': product, 'variant': variant}
return TemplateResponse(
request,
'dashboard/product/product_variant/modal/select_images.html',
ctx)
@staff_member_required
def ajax_available_variants_list(request):
"""Return variants filtered by request GET parameters.
Response format is that of a Select2 JS widget.
"""
available_products = Product.objects.published().prefetch_related(
'category',
'product_type__product_attributes')
queryset = ProductVariant.objects.filter(
product__in=available_products).prefetch_related(
'product__category',
'product__product_type__product_attributes')
search_query = request.GET.get('q', '')
if search_query:
queryset = queryset.filter(
Q(sku__icontains=search_query) |
Q(name__icontains=search_query) |
Q(product__name__icontains=search_query))
variants = [
{'id': variant.id, 'text': variant.get_ajax_label(request.discounts)}
for variant in queryset]
return JsonResponse({'results': variants})
@staff_member_required
@permission_required('product.manage_products')
def product_images(request, product_pk):
products = Product.objects.prefetch_related('images')
product = get_object_or_404(products, pk=product_pk)
images = product.images.all()
ctx = {
'product': product, 'images': images, 'is_empty': not images.exists()}
return TemplateResponse(
request, 'dashboard/product/product_image/list.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_image_create(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
product_image = ProductImage(product=product)
form = forms.ProductImageForm(
request.POST or None, request.FILES or None, instance=product_image)
if form.is_valid():
product_image = form.save()
msg = pgettext_lazy(
'Dashboard message',
'Added image %s') % (product_image.image.name,)
messages.success(request, msg)
return redirect('dashboard:product-image-list', product_pk=product.pk)
ctx = {'form': form, 'product': product, 'product_image': product_image}
return TemplateResponse(
request,
'dashboard/product/product_image/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_image_edit(request, product_pk, img_pk):
product = get_object_or_404(Product, pk=product_pk)
product_image = get_object_or_404(product.images, pk=img_pk)
form = forms.ProductImageForm(
request.POST or None, request.FILES or None, instance=product_image)
if form.is_valid():
product_image = form.save()
msg = pgettext_lazy(
'Dashboard message',
'Updated image %s') % (product_image.image.name,)
messages.success(request, msg)
return redirect('dashboard:product-image-list', product_pk=product.pk)
ctx = {'form': form, 'product': product, 'product_image': product_image}
return TemplateResponse(
request,
'dashboard/product/product_image/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def product_image_delete(request, product_pk, img_pk):
product = get_object_or_404(Product, pk=product_pk)
image = get_object_or_404(product.images, pk=img_pk)
if request.method == 'POST':
image.delete()
msg = pgettext_lazy(
'Dashboard message', 'Removed image %s') % (image.image.name,)
messages.success(request, msg)
return redirect('dashboard:product-image-list', product_pk=product.pk)
return TemplateResponse(
request,
'dashboard/product/product_image/modal/confirm_delete.html',
{'product': product, 'image': image})
@require_POST
@staff_member_required
def ajax_reorder_product_images(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.ReorderProductImagesForm(request.POST, instance=product)
status = 200
ctx = {}
if form.is_valid():
form.save()
elif form.errors:
status = 400
ctx = {'error': form.errors}
return JsonResponse(ctx, status=status)
@require_POST
@staff_member_required
def ajax_upload_image(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.UploadImageForm(
request.POST or None, request.FILES or None, product=product)
ctx = {}
status = 200
if form.is_valid():
image = form.save()
ctx = {'id': image.pk, 'image': None, 'order': image.sort_order}
elif form.errors:
status = 400
ctx = {'error': form.errors}
return JsonResponse(ctx, status=status)
@staff_member_required
@permission_required('product.manage_products')
def attribute_list(request):
attributes = (
Attribute.objects.prefetch_related(
'values', 'product_type', 'product_variant_type').order_by('name'))
attribute_filter = AttributeFilter(request.GET, queryset=attributes)
attributes = [(
attribute.pk, attribute.name,
attribute.product_type or attribute.product_variant_type,
attribute.values.all()) for attribute in attribute_filter.qs]
attributes = get_paginator_items(
attributes, settings.DASHBOARD_PAGINATE_BY, request.GET.get('page'))
ctx = {
'attributes': attributes,
'filter_set': attribute_filter,
'is_empty': not attribute_filter.queryset.exists()}
return TemplateResponse(
request, 'dashboard/product/attribute/list.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_details(request, pk):
attributes = Attribute.objects.prefetch_related(
'values', 'product_type', 'product_variant_type').all()
attribute = get_object_or_404(attributes, pk=pk)
product_type = attribute.product_type or attribute.product_variant_type
values = attribute.values.all()
ctx = {
'attribute': attribute, 'product_type': product_type, 'values': values}
return TemplateResponse(
request, 'dashboard/product/attribute/detail.html', ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_create(request):
attribute = Attribute()
form = forms.AttributeForm(request.POST or None, instance=attribute)
if form.is_valid():
attribute = form.save()
msg = pgettext_lazy('Dashboard message', 'Added attribute')
messages.success(request, msg)
return redirect('dashboard:attribute-details', pk=attribute.pk)
ctx = {'attribute': attribute, 'form': form}
return TemplateResponse(
request,
'dashboard/product/attribute/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_edit(request, pk):
attribute = get_object_or_404(Attribute, pk=pk)
form = forms.AttributeForm(request.POST or None, instance=attribute)
if form.is_valid():
attribute = form.save()
msg = pgettext_lazy('Dashboard message', 'Updated attribute')
messages.success(request, msg)
return redirect('dashboard:attribute-details', pk=attribute.pk)
ctx = {'attribute': attribute, 'form': form}
return TemplateResponse(
request,
'dashboard/product/attribute/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_delete(request, pk):
attribute = get_object_or_404(Attribute, pk=pk)
if request.method == 'POST':
attribute.delete()
msg = pgettext_lazy(
'Dashboard message', 'Removed attribute %s') % (attribute.name,)
messages.success(request, msg)
return redirect('dashboard:attributes')
return TemplateResponse(
request,
'dashboard/product/attribute/modal/'
'attribute_confirm_delete.html',
{'attribute': attribute})
@staff_member_required
@permission_required('product.manage_products')
def attribute_value_create(request, attribute_pk):
attribute = get_object_or_404(Attribute, pk=attribute_pk)
value = AttributeValue(attribute_id=attribute_pk)
form = forms.AttributeValueForm(request.POST or None, instance=value)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added attribute\'s value')
messages.success(request, msg)
return redirect('dashboard:attribute-details', pk=attribute_pk)
ctx = {'attribute': attribute, 'value': value, 'form': form}
return TemplateResponse(
request,
'dashboard/product/attribute/values/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_value_edit(request, attribute_pk, value_pk):
attribute = get_object_or_404(Attribute, pk=attribute_pk)
value = get_object_or_404(AttributeValue, pk=value_pk)
form = forms.AttributeValueForm(request.POST or None, instance=value)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated attribute\'s value')
messages.success(request, msg)
return redirect('dashboard:attribute-details', pk=attribute_pk)
ctx = {'attribute': attribute, 'value': value, 'form': form}
return TemplateResponse(
request,
'dashboard/product/attribute/values/form.html',
ctx)
@staff_member_required
@permission_required('product.manage_products')
def attribute_value_delete(request, attribute_pk, value_pk):
value = get_object_or_404(AttributeValue, pk=value_pk)
if request.method == 'POST':
value.delete()
msg = pgettext_lazy(
'Dashboard message',
'Removed attribute\'s value %s') % (value.name,)
messages.success(request, msg)
return redirect('dashboard:attribute-details', pk=attribute_pk)
return TemplateResponse(
request,
'dashboard/product/attribute/values/modal/confirm_delete.html',
{'value': value, 'attribute_pk': attribute_pk})
@staff_member_required
@permission_required('product.manage_products')
def ajax_reorder_attribute_values(request, attribute_pk):
attribute = get_object_or_404(Attribute, pk=attribute_pk)
form = forms.ReorderAttributeValuesForm(
request.POST, instance=attribute)
status = 200
ctx = {}
if form.is_valid():
form.save()
elif form.errors:
status = 400
ctx = {'error': form.errors}
return JsonResponse(ctx, status=status)
| UITools/saleor | saleor/dashboard/product/views.py | Python | bsd-3-clause | 25,822 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple examples of the REINFORCE algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
distributions = tf.contrib.distributions
sg = tf.contrib.bayesflow.stochastic_graph
st = tf.contrib.bayesflow.stochastic_tensor
def split_apply_merge(inp, partitions, fns):
"""Split input according to partitions. Pass results through fns and merge.
Args:
inp: the input vector
partitions: tensor of same length as input vector, having values 0, 1
fns: the two functions.
Returns:
the vector routed, where routed[i] = fns[partitions[i]](inp[i])
"""
new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
new_indices = tf.dynamic_partition(
tf.range(0, inp.get_shape()[0]), partitions, len(fns))
return tf.dynamic_stitch(new_indices, new_outputs)
def plus_1(inputs):
return inputs + 1.0
def minus_1(inputs):
return inputs - 1.0
def build_split_apply_merge_model():
"""Build the Split-Apply-Merge Model.
Route each value of input [-1, -1, 1, 1] through one of the
functions, plus_1, minus_1. The decision for routing is made by
4 Bernoulli R.V.s whose parameters are determined by a neural network
applied to the input. REINFORCE is used to update the NN parameters.
Returns:
The 3-tuple (route_selection, routing_loss, final_loss), where:
- route_selection is an int 4-vector
- routing_loss is a float 4-vector
- final_loss is a float scalar.
"""
inputs = tf.constant([[-1.0], [-1.0], [1.0], [1.0]])
targets = tf.constant([[0.0], [0.0], [0.0], [0.0]])
paths = [plus_1, minus_1]
weights = tf.get_variable("w", [1, 2])
bias = tf.get_variable("b", [1, 1])
logits = tf.matmul(inputs, weights) + bias
# REINFORCE forward step
route_selection = st.StochasticTensor(
distributions.Categorical(logits=logits))
# Accessing route_selection as a Tensor below forces a sample of
# the Categorical distribution based on its logits.
# This is equivalent to calling route_selection.value().
#
# route_selection.value() returns an int32 4-vector with random
# values in {0, 1}
# COPY+ROUTE+PASTE
outputs = split_apply_merge(inputs, route_selection, paths)
# flatten routing_loss to a row vector (from a column vector)
routing_loss = tf.reshape(tf.square(outputs - targets), shape=[-1])
# Total loss: score function loss + routing loss.
# The score function loss (through `route_selection.loss(routing_loss)`)
# returns:
# [stop_gradient(routing_loss) *
# route_selection.log_pmf(stop_gradient(route_selection.value()))],
# where log_pmf has gradients going all the way back to weights and bias.
# In this case, the routing_loss depends on the variables only through
# "route_selection", which has a stop_gradient on it. So the
# gradient of the loss really come through the score function
surrogate_loss = sg.surrogate_loss([routing_loss])
final_loss = tf.reduce_sum(surrogate_loss)
return (route_selection, routing_loss, final_loss)
class REINFORCESimpleExample(tf.test.TestCase):
def testSplitApplyMerge(self):
# Repeatability. SGD has a tendency to jump around, even here.
tf.set_random_seed(1)
with self.test_session() as sess:
# Use sampling to train REINFORCE
with st.value_type(st.SampleAndReshapeValue(n=1)):
(route_selection,
routing_loss,
final_loss) = build_split_apply_merge_model()
sgd = tf.train.GradientDescentOptimizer(1.0).minimize(final_loss)
tf.initialize_all_variables().run()
for i in range(10):
# Run loss and inference step. This toy problem converges VERY quickly.
(routing_loss_v, final_loss_v, route_selection_v, _) = sess.run(
[routing_loss, final_loss, tf.identity(route_selection), sgd])
print(
"Iteration %d, routing loss: %s, final_loss: %s, "
"route selection: %s"
% (i, routing_loss_v, final_loss_v, route_selection_v))
self.assertAllEqual([0, 0, 1, 1], route_selection_v)
self.assertAllClose([0.0, 0.0, 0.0, 0.0], routing_loss_v)
self.assertAllClose(0.0, final_loss_v)
if __name__ == "__main__":
tf.test.main()
| tongwang01/tensorflow | tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py | Python | apache-2.0 | 5,012 |
from setuptools import find_packages, setup
version='0.1.2'
setup(
name='Trac2mite',
version=version,
description="Trac2mite connects your Trac account with your mite.account. Track your time easily on issues within Trac (requires 'TracHoursPlugin') and get them automatically send to mite.",
packages=find_packages(exclude=['*.tests*']),
author="Yolk - Sebastian Munz & Julia Soergel GbR / Thomas Klein",
author_email='[email protected]',
url="http://github.com/thomasklein/Trac2mite",
keywords="trac plugin mite yolk",
license="MIT License",
install_requires=['TracHoursPlugin'],
dependency_links=['http://trac-hacks.org/svn/trachoursplugin/0.11'],
include_package_data=True,
package_data={'trac2mite': ['templates/*.html',
'htdocs/css/*.css',
'htdocs/js/*.js',
'htdocs/images/*']},
entry_points = """
[trac.plugins]
trac2mite.trac2mite = trac2mite.trac2mite
trac2mite.setup = trac2mite.setup
trac2mite.userprefs = trac2mite.userprefs
trac2mite.web_ui = trac2mite.web_ui
"""
) | thomasklein/Trac2mite | 0.11/setup.py | Python | mit | 1,034 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
| prasannav7/ggrc-core | test/integration/ggrc/behave/__init__.py | Python | apache-2.0 | 238 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import logging
import datetime
from oslo.config import cfg
__all__ = [
'FormatNamedFileHandler',
'ConfigurableSyslogHandler',
]
class FormatNamedFileHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
# Include timestamp in the name.
filename = filename.format(ts=str(datetime.datetime.utcnow()).replace(' ', '_'),
pid=os.getpid())
super(FormatNamedFileHandler, self).__init__(filename, mode=mode, maxBytes=maxBytes,
backupCount=backupCount, encoding=encoding,
delay=delay)
class ConfigurableSyslogHandler(logging.handlers.SysLogHandler):
def __init__(self, address=None, facility=None, socktype=None):
if not address:
address = (cfg.CONF.syslog.host, cfg.CONF.syslog.port)
if not facility:
facility = cfg.CONF.syslog.facility
if not socktype:
protocol = cfg.CONF.syslog.protocol.lower()
if protocol == 'udp':
socktype = socket.SOCK_DGRAM
elif protocol == 'tcp':
socktype = socket.SOCK_STREAM
else:
raise ValueError('Unsupported protocol: %s' % (protocol))
if socktype:
super(ConfigurableSyslogHandler, self).__init__(address, facility, socktype)
else:
super(ConfigurableSyslogHandler, self).__init__(address, facility)
| pinterb/st2 | st2common/st2common/logging/handlers.py | Python | apache-2.0 | 2,387 |
import numpy as numpy
# This function allows the code to compute numerical values for mathematical functions given as "String".
def f(x, s):
return eval(s)
# Function should return Energy Eigenvalue via "Shooting Method"
def findE(E0, Ef, tol, N, dx, f_string):
# Since we know an eigenvalue exists between E0 and Ef, we need to find out
# which values of E satisfy the necessary boundary conditions for the input potential
Energy = np.linspace(E0, Ef, f_string)
psiEnd = np.zeros(10) # Initialize an array of 10 zeros as placeholders.
x = np.linspace(-20, 20, N)
for j in range(10): # Set up an array of psiEnd (boundary values) for given E values.
psi = np.zeros(N)
psi[0] = 0.0
psi[1] = dx
# This loop will calculate the value of psiEnd for each possible Energy value.
for i in range(N-2):
V = f(x[i], f_string)
psi[i+2] = ( -psi[i] + 2.*psi[i+1] - 2.*(Energy[j] - V) * (dx**2) * psi[i+1] )
psiEnd[j] = psi[-1] # Append the final value to the psiEnd array
if psiEnd[j-1] * psiEnd[j] < 0:
break
# Check to see if we've found a value that matches boundary conditions (we define the acceptable tolerance)
if abs(psiEnd[j]) < tol:
return Energy[j]
else if abs(psiEnd[j-1]) < tol:
return Energy[j-1]
# If no value matches our defined tolerance, recursively call the findE function with the 2 energies we know it must occur between.
else:
return findE(Energy[j-1], Energy[j], tol, N, dx, f_string) | KalterTod/S-EqSolver | EqSolver.py | Python | mit | 1,444 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""RPC compatible subprocess-type module.
This module defined both a task-side process class as well as a controller-side
process wrapper for easier access and usage of the task-side process.
"""
import logging
import os
import subprocess
import sys
import threading
import time
#pylint: disable=relative-import
import common_lib
# Map swarming_client to use subprocess42
sys.path.append(common_lib.SWARMING_DIR)
from utils import subprocess42
class TimeoutError(Exception):
pass
class ControllerProcessWrapper(object):
"""Controller-side process wrapper class.
This class provides a more intuitive interface to task-side processes
than calling the methods directly using the RPC object.
"""
def __init__(self, rpc, cmd, verbose=False, detached=False, cwd=None,
key=None, shell=None):
logging.debug('Creating a process with cmd=%s', cmd)
self._rpc = rpc
self._key = rpc.subprocess.Process(cmd, key)
logging.debug('Process created with key=%s', self._key)
if verbose:
self._rpc.subprocess.SetVerbose(self._key)
if detached:
self._rpc.subprocess.SetDetached(self._key)
if cwd:
self._rpc.subprocess.SetCwd(self._key, cwd)
if shell:
self._rpc.subprocess.SetShell(self._key)
self._rpc.subprocess.Start(self._key)
@property
def key(self):
return self._key
def Terminate(self):
logging.debug('Terminating process %s', self._key)
return self._rpc.subprocess.Terminate(self._key)
def Kill(self):
logging.debug('Killing process %s', self._key)
self._rpc.subprocess.Kill(self._key)
def Delete(self):
return self._rpc.subprocess.Delete(self._key)
def GetReturncode(self):
return self._rpc.subprocess.GetReturncode(self._key)
def ReadStdout(self):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
return self._rpc.subprocess.ReadStdout(self._key)
def ReadStderr(self):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadStderr(self._key)
def ReadOutput(self):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadOutput(self._key)
def Wait(self, timeout=None):
return self._rpc.subprocess.Wait(self._key, timeout)
def Poll(self):
return self._rpc.subprocess.Poll(self._key)
def GetPid(self):
return self._rpc.subprocess.GetPid(self._key)
class Process(object):
"""Implements a task-side non-blocking subprocess.
This non-blocking subprocess allows the caller to continue operating while
also able to interact with this subprocess based on a key returned to
the caller at the time of creation.
Creation args are set via Set* methods called after calling Process but
before calling Start. This is due to a limitation of the XML-RPC
implementation not supporting keyword arguments.
"""
_processes = {}
_process_next_id = 0
_creation_lock = threading.Lock()
def __init__(self, cmd, key):
self.stdout = ''
self.stderr = ''
self.key = key
self.cmd = cmd
self.proc = None
self.cwd = None
self.shell = False
self.verbose = False
self.detached = False
self.complete = False
self.data_lock = threading.Lock()
self.stdout_file = open(self._CreateOutputFilename('stdout'), 'wb+')
self.stderr_file = open(self._CreateOutputFilename('stderr'), 'wb+')
def _CreateOutputFilename(self, fname):
return os.path.join(common_lib.GetOutputDir(), '%s.%s' % (self.key, fname))
def __str__(self):
return '%r, cwd=%r, verbose=%r, detached=%r' % (
self.cmd, self.cwd, self.verbose, self.detached)
def _reader(self):
for pipe, data in self.proc.yield_any():
with self.data_lock:
if pipe == 'stdout':
self.stdout += data
self.stdout_file.write(data)
self.stdout_file.flush()
if self.verbose:
sys.stdout.write(data)
else:
self.stderr += data
self.stderr_file.write(data)
self.stderr_file.flush()
if self.verbose:
sys.stderr.write(data)
self.complete = True
@classmethod
def KillAll(cls):
for key in cls._processes:
cls.Kill(key)
@classmethod
def Process(cls, cmd, key=None):
with cls._creation_lock:
if not key:
key = 'Process%d' % cls._process_next_id
cls._process_next_id += 1
if key in cls._processes:
raise KeyError('Key %s already in use' % key)
logging.debug('Creating process %s with cmd %r', key, cmd)
cls._processes[key] = cls(cmd, key)
return key
def _Start(self):
logging.info('Starting process %s', self)
self.proc = subprocess42.Popen(self.cmd, stdout=subprocess42.PIPE,
stderr=subprocess42.PIPE,
detached=self.detached, cwd=self.cwd,
shell=self.shell)
threading.Thread(target=self._reader).start()
@classmethod
def Start(cls, key):
cls._processes[key]._Start()
@classmethod
def SetCwd(cls, key, cwd):
"""Sets the process's cwd."""
logging.debug('Setting %s cwd to %s', key, cwd)
cls._processes[key].cwd = cwd
@classmethod
def SetShell(cls, key):
"""Sets the process's shell arg to True."""
logging.debug('Setting %s.shell = True', key)
cls._processes[key].shell = True
@classmethod
def SetDetached(cls, key):
"""Creates a detached process."""
logging.debug('Setting %s.detached = True', key)
cls._processes[key].detached = True
@classmethod
def SetVerbose(cls, key):
"""Sets the stdout and stderr to be emitted locally."""
logging.debug('Setting %s.verbose = True', key)
cls._processes[key].verbose = True
@classmethod
def Terminate(cls, key):
logging.debug('Terminating process %s', key)
cls._processes[key].proc.terminate()
@classmethod
def Kill(cls, key):
logging.debug('Killing process %s', key)
cls._processes[key].proc.kill()
@classmethod
def Delete(cls, key):
if cls.GetReturncode(key) is None:
logging.warning('Killing %s before deleting it', key)
cls.Kill(key)
logging.debug('Deleting process %s', key)
cls._processes.pop(key)
@classmethod
def GetReturncode(cls, key):
return cls._processes[key].proc.returncode
@classmethod
def ReadStdout(cls, key):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stdout data
stdout = proc.stdout
proc.stdout = ''
return stdout
@classmethod
def ReadStderr(cls, key):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stderr data
stderr = proc.stderr
proc.stderr = ''
return stderr
@classmethod
def ReadOutput(cls, key):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return cls.ReadStdout(key), cls.ReadStderr(key)
@classmethod
def Wait(cls, key, timeout=None):
"""Wait for the process to complete.
We wait for all of the output to be written before returning. This solves
a race condition found on Windows where the output can lag behind the
wait call.
Raises:
TimeoutError if the process doesn't finish in the specified timeout.
"""
end = None if timeout is None else timeout + time.time()
while end is None or end > time.time():
if cls._processes[key].complete:
return
time.sleep(0.05)
raise TimeoutError()
@classmethod
def Poll(cls, key):
return cls._processes[key].proc.poll()
@classmethod
def GetPid(cls, key):
return cls._processes[key].proc.pid
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/testing/legion/process.py | Python | mit | 8,760 |
import pytest
from cli_config.script import script
from utility.nix_error import NixError
def test_script_rename_no_script(capsys):
with pytest.raises(SystemExit) as _excinfo:
script.script("nixconfig", ["rename"])
_out, _err = capsys.readouterr()
assert _excinfo.value.code is 2, "Incorrect exit code. Should be 2, received {}".format(_excinfo.value.code)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: name, name_new" in _err, "StdErr doesn't contain expected string"
def test_script_rename_one_script(capsys):
with pytest.raises(SystemExit) as _excinfo:
script.script("nixconfig", ["rename", "script1"])
_out, _err = capsys.readouterr()
assert _excinfo.value.code is 2, "Incorrect exit code. Should be 2, received {}".format(_excinfo.value.code)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: name_new" in _err, "StdErr doesn't contain expected string"
def test_script_rename_bad_script(capsys):
with pytest.raises(NixError) as _excinfo:
script.script("nixconfig", ["rename", "badscript", "newscript"])
_out, _err = capsys.readouterr()
assert "Unable to find script: badscript" in str(_excinfo.value)
assert len(_out) is 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_script_rename_to_exist_script(capsys):
with pytest.raises(NixError) as _excinfo:
script.script("nixconfig", ["rename", "badscript", "script1"])
_out, _err = capsys.readouterr()
assert "New script name is already used: script1" in str(_excinfo.value)
assert len(_out) is 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_script_rename_same(capsys):
with pytest.raises(NixError) as _excinfo:
script.script("nixconfig", ["rename", "script1", "script1"])
_out, _err = capsys.readouterr()
assert "Old and new script names are the same: script1" in str(_excinfo.value)
assert len(_out) is 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_script_rename_good_script(capsys):
script.script("nixconfig", ["rename", "script1", "newscript"])
_out, _err = capsys.readouterr()
assert len(_out) is 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
script.script("nixconfig", ["rename", "newscript", "script1"])
_out, _err = capsys.readouterr()
assert len(_out) is 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
| mbiciunas/nix | test/cli_config/script/test_script_rename.py | Python | gpl-3.0 | 2,951 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import os
import shutil
import uuid
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import oslo_i18n
from oslo_messaging import conffixture as messaging_conffixture
import oslotest.base as base_test
from manila.db import migration
from manila.db.sqlalchemy import api as db_api
from manila.db.sqlalchemy import models as db_models
from manila import rpc
from manila import service
from manila.tests import conf_fixture
from manila.tests import fake_notifier
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite database.'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
_DB_CACHE = None
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection, sqlite_db,
sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
self.setup_sqlite(db_migrate)
else:
testdb = os.path.join(CONF.state_path, sqlite_db)
db_migrate.upgrade('head')
if os.path.exists(testdb):
return
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose) # pylint: disable=E1101
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db),
)
def setup_sqlite(self, db_migrate):
if db_migrate.version():
return
db_models.BASE.metadata.create_all(self.engine)
db_migrate.stamp('head')
class TestCase(base_test.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
oslo_i18n.enable_lazy(enable=False)
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(
db_api,
migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db,
)
self.useFixture(_DB_CACHE)
self.injected = []
self._services = []
self.flags(fatal_exception_format_errors=True)
# This will be cleaned up by the NestedTempfile fixture
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
self.fixture.config(
disable_process_locking=True, group='oslo_concurrency')
rpc.add_extra_exmods('manila.tests')
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
mock.patch('keystoneauth1.loading.load_auth_from_conf_options').start()
fake_notifier.stub_notifier(self)
def tearDown(self):
"""Runs after each test method to tear down test environment."""
super(TestCase, self).tearDown()
# Reset any overridden flags
CONF.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.items():
CONF.set_override(k, v, enforce_type=True)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'manila-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_attr:
new_attr = mock.Mock()
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_attr
def mock_class(self, class_name, new_val=None, **kwargs):
"""Use python mock to mock a class
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_val:
new_val = mock.Mock()
patcher = mock.patch(class_name, new_val, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_val
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{"msg": msg, "d1str": d1str, "d2str": d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{"d1only": d1only, "d2only": d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
"key": key,
"d1value": d1value,
"d2value": d2value
})
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' %
{"msg": msg, "L1str": L1str, "L2str": L2str})
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' %
{"L1count": L1count, "L2count": L2count})
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'."""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'."""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertIsInstance(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility."""
try:
f = super(TestCase, self).assertIsInstance
except AttributeError:
self.assertIsInstance(a, b)
else:
f(a, b, *args, **kwargs)
def assertIsNone(self, a, *args, **kwargs):
"""Python < v2.7 compatibility."""
try:
f = super(TestCase, self).assertIsNone
except AttributeError:
self.assertTrue(a is None)
else:
f(a, *args, **kwargs)
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.iteritems()
if k not in ignored_keys}
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
| NetApp/manila | manila/test.py | Python | apache-2.0 | 12,603 |
#!/usr/bin/env python
# encoding: utf-8
from RedBlackTree import RedBlackTree
import random
"""
TP2.py
Coéquipiers :
Tristan Savaria
"""
def main(n):
living = n
starting_gold = 1000000
RBPosition = RedBlackTree()
RBGold = RedBlackTree()
#Set up and initial turn
for x in range(living):
post = 0 + random.uniform(-1, 1) * starting_gold
RBGold.Insert(post, starting_gold)
RBPosition.Insert(x, post)
while living > 2:
for x in range(living):
print("start")
print("RBPOSITION")
RBPosition.Print()
print("RBGOLD")
RBGold.Print()
oldPost = RBPosition.Find(x).getValue()
oldGold = RBGold.Find(oldPost).getValue()
post = oldPost + random.uniform(-1, 1) * oldGold
print("Removing " + str(oldPost) + " from RBGOLD")
RBGold.Remove(oldPost)
RBGold.Insert(post, oldGold)
#print("Inserting " + str(post) + " " + str(oldGold) + " in RBGOLD")
print("Removing " + str(x) + " from RBPOSITION")
RBPosition.Remove(x)
RBPosition.Print()
#RBGold.Print()
print("Inserting " + str(x) + " " + str(post) + " in RBPOSITION")
RBPosition.Insert(x, post)
RBPosition.Print()
print("done")
current = RBGold.Find(post)
nearest = None
if current.GetLeft() != None:
nearest = current.GetLeft().find_max()
elif current.GetRight() != None:
nearest = current.GetRight().find_min()
else:
nearest = current.GetParent()
stolen = nearest.getValue() // 2
nearest._value = stolen
# if nearest.getValue() == 0:
# print("dead")
# living -= 1
# RBGold.Remove(nearest.getKey())
# RBPost.Remove(RBPost._HORRIBLE_TP_RELATED_SEARCH(RBPost.self_tree, nearest.getKey()))
# newGold = current.getValue() + stolen
# print(post)
# RBGold.Remove(post)
# RBGold.Insert(post, newGold)
if living <= 2:
break
RBPost.Print()
RBGold.Print()
print("al")
main(10) | Tri125/IFT-2015-TP3 | TP3.py | Python | bsd-3-clause | 1,876 |
"""Tests for Transmission config flow."""
from datetime import timedelta
import pytest
from transmissionrpc.error import TransmissionError
from homeassistant import data_entry_flow
from homeassistant.components import transmission
from homeassistant.components.transmission import config_flow
from homeassistant.components.transmission.const import (
CONF_LIMIT,
CONF_ORDER,
DEFAULT_LIMIT,
DEFAULT_NAME,
DEFAULT_ORDER,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
NAME = "Transmission"
HOST = "192.168.1.100"
USERNAME = "username"
PASSWORD = "password"
PORT = 9091
SCAN_INTERVAL = 10
MOCK_ENTRY = {
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
@pytest.fixture(name="api")
def mock_transmission_api():
"""Mock an api."""
with patch("transmissionrpc.Client"):
yield
@pytest.fixture(name="auth_error")
def mock_api_authentication_error():
"""Mock an api."""
with patch(
"transmissionrpc.Client", side_effect=TransmissionError("401: Unauthorized")
):
yield
@pytest.fixture(name="conn_error")
def mock_api_connection_error():
"""Mock an api."""
with patch(
"transmissionrpc.Client",
side_effect=TransmissionError("111: Connection refused"),
):
yield
@pytest.fixture(name="unknown_error")
def mock_api_unknown_error():
"""Mock an api."""
with patch("transmissionrpc.Client", side_effect=TransmissionError):
yield
@pytest.fixture(name="transmission_setup", autouse=True)
def transmission_setup_fixture():
"""Mock transmission entry setup."""
with patch(
"homeassistant.components.transmission.async_setup_entry", return_value=True
):
yield
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.TransmissionFlowHandler()
flow.hass = hass
return flow
async def test_flow_user_config(hass, api):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
transmission.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_flow_required_fields(hass, api):
"""Test with required fields only."""
result = await hass.config_entries.flow.async_init(
transmission.DOMAIN,
context={"source": "user"},
data={CONF_NAME: NAME, CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
async def test_flow_all_provided(hass, api):
"""Test with all provided."""
result = await hass.config_entries.flow.async_init(
transmission.DOMAIN, context={"source": "user"}, data=MOCK_ENTRY
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_PORT] == PORT
async def test_options(hass):
"""Test updating options."""
entry = MockConfigEntry(
domain=transmission.DOMAIN,
title=CONF_NAME,
data=MOCK_ENTRY,
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
flow = init_config_flow(hass)
options_flow = flow.async_get_options_flow(entry)
result = await options_flow.async_step_init()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await options_flow.async_step_init({CONF_SCAN_INTERVAL: 10})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_SCAN_INTERVAL] == 10
async def test_import(hass, api):
"""Test import step."""
flow = init_config_flow(hass)
# import with minimum fields only
result = await flow.async_step_import(
{
CONF_NAME: DEFAULT_NAME,
CONF_HOST: HOST,
CONF_PORT: DEFAULT_PORT,
CONF_SCAN_INTERVAL: timedelta(seconds=DEFAULT_SCAN_INTERVAL),
CONF_LIMIT: DEFAULT_LIMIT,
CONF_ORDER: DEFAULT_ORDER,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == DEFAULT_NAME
assert result["data"][CONF_NAME] == DEFAULT_NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT
assert result["data"][CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
# import with all
result = await flow.async_step_import(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
CONF_SCAN_INTERVAL: timedelta(seconds=SCAN_INTERVAL),
CONF_LIMIT: DEFAULT_LIMIT,
CONF_ORDER: DEFAULT_ORDER,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SCAN_INTERVAL] == SCAN_INTERVAL
async def test_host_already_configured(hass, api):
"""Test host is already configured."""
entry = MockConfigEntry(
domain=transmission.DOMAIN,
data=MOCK_ENTRY,
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
transmission.DOMAIN, context={"source": "user"}, data=MOCK_ENTRY
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_name_already_configured(hass, api):
"""Test name is already configured."""
entry = MockConfigEntry(
domain=transmission.DOMAIN,
data=MOCK_ENTRY,
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
entry.add_to_hass(hass)
mock_entry = MOCK_ENTRY.copy()
mock_entry[CONF_HOST] = "0.0.0.0"
result = await hass.config_entries.flow.async_init(
transmission.DOMAIN, context={"source": "user"}, data=mock_entry
)
assert result["type"] == "form"
assert result["errors"] == {CONF_NAME: "name_exists"}
async def test_error_on_wrong_credentials(hass, auth_error):
"""Test with wrong credentials."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {
CONF_USERNAME: "wrong_credentials",
CONF_PASSWORD: "wrong_credentials",
}
async def test_error_on_connection_failure(hass, conn_error):
"""Test when connection to host fails."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_error_on_unknwon_error(hass, unknown_error):
"""Test when connection to host fails."""
flow = init_config_flow(hass)
result = await flow.async_step_user(
{
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_PORT: PORT,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
| nkgilley/home-assistant | tests/components/transmission/test_config_flow.py | Python | apache-2.0 | 8,440 |
import datetime
import pyclassifiers.values
import config.general
project_github_username = 'veltzer'
project_name = 'pysigfd'
github_repo_name = project_name
project_website = 'https://{project_github_username}.github.io/{project_name}'.format(**locals())
project_website_source = 'https://github.com/{project_github_username}/{project_name}'.format(**locals())
project_website_git = 'git://github.com/{project_github_username}/{project_name}.git'.format(**locals())
project_website_download_ppa = 'https://launchpanet/~mark-veltzer/+archive/ubuntu/ppa'
project_website_download_src = project_website_source
# noinspection SpellCheckingInspection
project_paypal_donate_button_id = 'ASPRXR59H2NTQ'
project_google_analytics_tracking_id = 'UA-56436979-1'
project_short_description = 'linux signal file descriptor for python'
project_long_description = project_short_description
# keywords to put on html pages or for search, dont put the name of the project or my details
# as they will be added automatically...
project_keywords = [
'signalfd',
'python3',
'linux',
]
project_license = 'MIT'
project_year_started = '2013'
project_description = project_short_description
project_platforms = [
'python3',
]
project_classifiers = [
pyclassifiers.values.DevelopmentStatus__4_Beta,
pyclassifiers.values.Environment__Console,
pyclassifiers.values.OperatingSystem__OSIndependent,
pyclassifiers.values.ProgrammingLanguage__Python,
pyclassifiers.values.ProgrammingLanguage__Python__3,
pyclassifiers.values.ProgrammingLanguage__Python__3__Only,
pyclassifiers.values.ProgrammingLanguage__Python__36,
pyclassifiers.values.ProgrammingLanguage__Python__37,
pyclassifiers.values.ProgrammingLanguage__Python__38,
pyclassifiers.values.Topic__Utilities,
pyclassifiers.values.License__OSIApproved__MITLicense,
]
project_data_files = []
# project_data_files.append(templar.utils.hlp_files_under('/usr/bin', 'src/*'))
project_copyright_years = ', '.join(
map(str, range(int(project_year_started), datetime.datetime.now().year + 1)))
if str(config.general.general_current_year) == project_year_started:
project_copyright_years_short = config.general.general_current_year
else:
project_copyright_years_short = '{0}-{1}'.format(project_year_started, config.general.general_current_year)
project_google_analytics_snipplet = '''<script type="text/javascript">
(function(i,s,o,g,r,a,m){{i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){{
(i[r].q=i[r].q||[]).push(arguments)}},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
}})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{0}', 'auto');
ga('send', 'pageview');
</script>'''.format(project_google_analytics_tracking_id)
project_paypal_donate_button_snipplet = '''<form action="https://www.paypal.com/cgi-bin/webscr"
method="post" target="_top">
<input type="hidden" name="cmd" value="_s-xclick">
<input type="hidden" name="hosted_button_id" value="{0}">
<input type="image" src="https://www.paypalobjects.com/en_US/IL/i/btn/btn_donateCC_LG.gif" name="submit"
alt="PayPal - The safer, easier way to pay online!">
<img alt="" border="0" src="https://www.paypalobjects.com/en_US/i/scr/pixel.gif" width="1" height="1">
</form>'''.format(project_paypal_donate_button_id)
| veltzer/python-sigfd | config/project.py | Python | gpl-3.0 | 3,406 |
#! /usr/bin/env python
# $Id: test_dependencies.py 5720 2008-10-31 17:50:17Z goodger $
# Author: Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Test module for the --record-dependencies option.
"""
import os.path
import unittest
import sys
import DocutilsTestSupport # must be imported before docutils
import docutils.core
import docutils.utils
class RecordDependenciesTests(unittest.TestCase):
# docutils.utils.DependencyList records relative URLs, not platform paths,
# so use "/" as a path separator even on Windows (not os.path.join).
def get_record(self, **settings):
recordfile = 'record.txt'
settings.setdefault('source_path',
os.path.join('data', 'dependencies.txt'))
settings.setdefault('settings_overrides', {})
settings['settings_overrides'] = settings['settings_overrides'].copy()
settings['settings_overrides']['_disable_config'] = 1
if 'record_dependencies' not in settings['settings_overrides']:
settings['settings_overrides']['record_dependencies'] = \
docutils.utils.DependencyList(recordfile)
docutils.core.publish_file(destination=DocutilsTestSupport.DevNull(),
**settings)
settings['settings_overrides']['record_dependencies'].close()
return open(recordfile).read().splitlines()
def test_dependencies(self):
self.assertEqual(self.get_record(),
['data/include.txt', 'data/raw.txt'])
self.assertEqual(self.get_record(writer_name='latex'),
['data/include.txt',
'data/raw.txt',
# this is a URL, not a path:
'some_image.png'])
def test_csv_dependencies(self):
try:
import csv
self.assertEqual(
self.get_record(source_path=os.path.join('data',
'csv_dep.txt')),
['data/csv_data.txt'])
except ImportError:
pass
def test_stylesheet_dependencies(self):
# Parameters to publish_file.
s = {'settings_overrides': {}}
so = s['settings_overrides']
so['embed_stylesheet'] = 0
# must use '/', not os.sep or os.path.join, because of URL handling
# (see docutils.utils.relative_path):
stylesheet_path = 'data/stylesheet.txt'
so['stylesheet_path'] = stylesheet_path
so['stylesheet'] = None
s['writer_name'] = 'html'
record = self.get_record(**s)
self.assert_(stylesheet_path not in record,
'%r should not be in %r' % (stylesheet_path, record))
so['embed_stylesheet'] = 1
record = self.get_record(**s)
self.assert_(stylesheet_path in record,
'%r should be in %r' % (stylesheet_path, record))
s['writer_name'] = 'latex'
record = self.get_record(**s)
self.assert_(stylesheet_path in record,
'%r should be in %r' % (stylesheet_path, record))
del so['embed_stylesheet']
record = self.get_record(**s)
self.assert_(stylesheet_path not in record,
'%r should not be in %r' % (stylesheet_path, record))
if __name__ == '__main__':
unittest.main()
| spreeker/democracygame | external_apps/docutils-snapshot/test/test_dependencies.py | Python | bsd-3-clause | 3,443 |
import binascii
import ipaddress
import os
import sys
from collections import deque
from itertools import islice
APP_KEY = "aiohttp_debugtoolbar"
TEMPLATE_KEY = "aiohttp_debugtoolbar_jinja2"
REDIRECT_CODES = (300, 301, 302, 303, 305, 307, 308)
STATIC_PATH = "static/"
ROOT_ROUTE_NAME = "debugtoolbar.main"
STATIC_ROUTE_NAME = "debugtoolbar.static"
EXC_ROUTE_NAME = "debugtoolbar.exception"
def hexlify(value):
# value must be int or bytes
if isinstance(value, int):
value = bytes(str(value), encoding="utf-8")
return str(binascii.hexlify(value), encoding="utf-8")
# TODO: refactor to simpler container or change to ordered dict
class ToolbarStorage(deque):
"""Deque for storing Toolbar objects."""
def __init__(self, max_elem):
super().__init__([], max_elem)
def get(self, request_id, default=None):
dict_ = dict(self)
return dict_.get(request_id, default)
def put(self, request_id, request):
self.appendleft((request_id, request))
def last(self, num_items):
"""Returns the last `num_items` Toolbar objects"""
return list(islice(self, 0, num_items))
class ExceptionHistory:
def __init__(self):
self.frames = {}
self.tracebacks = {}
self.eval_exc = "show"
def addr_in(addr, hosts):
for host in hosts:
if ipaddress.ip_address(addr) in ipaddress.ip_network(host):
return True
return False
def replace_insensitive(string, target, replacement):
"""Similar to string.replace() but is case insensitive
Code borrowed from: http://forums.devshed.com/python-programming-11/
case-insensitive-string-replace-490921.html
"""
no_case = string.lower()
index = no_case.rfind(target.lower())
if index >= 0:
start = index + len(target)
return string[:index] + replacement + string[start:]
else: # no results so return the original string
return string
def render(template_name, app, context, *, app_key=TEMPLATE_KEY, **kw):
lookup = app[app_key]
template = lookup.get_template(template_name)
c = context.copy()
c.update(kw)
txt = template.render(**c)
return txt
def common_segment_count(path, value):
"""Return the number of path segments common to both"""
i = 0
if len(path) <= len(value):
for x1, x2 in zip(path, value):
if x1 == x2:
i += 1
else:
return 0
return i
def format_fname(value, _sys_path=None):
if _sys_path is None:
_sys_path = sys.path # dependency injection
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(("{", "<")):
return value
if value.startswith("." + os.path.sep):
return value
return "." + os.path.sep + value
# Loop through sys.path to find the longest match and return
# the relative path from there.
prefix_len = 0
value_segs = value.split(os.path.sep)
for path in _sys_path:
count = common_segment_count(path.split(os.path.sep), value_segs)
if count > prefix_len:
prefix_len = count
return "<%s>" % os.path.sep.join(value_segs[prefix_len:])
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ""
if not isinstance(s, (str, bytes)):
s = str(s)
if isinstance(s, bytes):
try:
s.decode("ascii")
except UnicodeDecodeError:
s = s.decode("utf-8", "replace")
s = s.replace("&", "&").replace("<", "<").replace(">", ">")
if quote:
s = s.replace('"', """)
return s
class ContextSwitcher:
"""This object is alternative to *await*. It is useful in cases
when you need to track context switches inside coroutine.
see: https://www.python.org/dev/peps/pep-0380/#formal-semantics
"""
def __init__(self):
self._on_context_switch_out = []
self._on_context_switch_in = []
def add_context_in(self, callback):
if not callable(callback):
raise ValueError("callback should be callable")
self._on_context_switch_in.append(callback)
def add_context_out(self, callback):
if not callable(callback):
raise ValueError("callback should be callable")
self._on_context_switch_out.append(callback)
def __call__(self, expr):
def iterate():
for callbale in self._on_context_switch_in:
callbale()
_i = iter(expr.__await__())
try:
_y = next(_i)
except StopIteration as _e:
_r = _e.value
else:
while 1:
try:
for callbale in self._on_context_switch_out:
callbale()
_s = yield _y
for callbale in self._on_context_switch_in:
callbale()
except GeneratorExit as _e:
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e:
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else:
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _e.value
break
else:
try:
if _s is None:
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e:
_r = _e.value
break
result = _r
for callbale in self._on_context_switch_out:
callbale()
return result
return _Coro(iterate())
class _Coro:
__slots__ = ("_it",)
def __init__(self, it):
self._it = it
def __await__(self):
return self._it
| aio-libs/aiohttp-debugtoolbar | aiohttp_debugtoolbar/utils.py | Python | apache-2.0 | 6,867 |
# Spawn Area file created with PSWG Planetary Spawn Tool
import sys
from java.util import Vector
def addSpawnArea(core):
dynamicGroups = Vector()
dynamicGroups.add('dathomir_bolma')
dynamicGroups.add('dathomir_purbole')
dynamicGroups.add('dathomir_rancor')
dynamicGroups.add('dathomir_rhoa')
core.spawnService.addDynamicSpawnArea(dynamicGroups, -6000, 6000, 2500, 'dathomir')
return
| agry/NGECore2 | scripts/mobiles/spawnareas/dathomir_nw_1.py | Python | lgpl-3.0 | 391 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This tools is used to read a tab seperated file and format it into a table
@author: Hoang Duc Chinh <[email protected]>
'''
# Copyright (c) 2015, Hoang Duc Chinh <[email protected]>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
__author__ = "Hoang Duc Chinh <[email protected]>"
__copyright__ = "Copyright 2015, pydemo"
__credits__ = [ "Hoang Duc Chinh" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Hoang Duc Chinh"
__email__ = "<[email protected]>"
__status__ = "Prototype"
import csv
import sys
import os
import argparse
def is_number(s):
''' check if a string is a float
Arguments:
s -- input string to be checked
'''
try:
float(s)
return True
except ValueError:
return False
def beautifulize(in_file_path,out_file_path):
''' Read a CSV file and format the data and then write the output to another
file.
Arguments:
in_file_path -- input file to be read
out_file_path -- output file which contains the formatted data
Return:
linecount ------ number of lines in the input file
'''
maxlengths = []
linecount = 0
# Identify the max lenght of each field
try:
with open(in_file_path,'r') as csvfile:
csvreader = csv.reader(csvfile,delimiter='\t',quotechar='"',quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
if len(maxlengths) < len(row):
maxlengths += [0] * ( len(row) - len(maxlengths) )
for idx, val in enumerate(row):
if len(val) > maxlengths[idx]:
maxlengths[idx] = len(val)
linecount += 1
print(row)
print(maxlengths)
except Exception as err:
print('Cannot process the input file %s' % input_file_path)
print(err)
return linecount
# Align each column respected to its max length
try:
print('Output as formatted data: ')
# If no output file, just print out the results
if out_file_path is None:
with open(in_file_path,'r') as csvfile:
csvreader = csv.reader(csvfile,delimiter='\t',quotechar='"',quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
for idx, val in enumerate(row):
if maxlengths[idx] > len(val):
if (is_number(val)):
row[idx] = " " * (maxlengths[idx] - len(val)) + row[idx]
else:
row[idx] += " " * (maxlengths[idx] - len(val))
print(row)
# If there is output file, print out the results
# and write them to the output file
else:
with open(in_file_path,'r') as csvfile:
csvreader = csv.reader(csvfile,delimiter='\t',quotechar='"',quoting=csv.QUOTE_MINIMAL)
with open(out_file_path,'w') as csvfileout:
csvwriter = csv.writer(csvfileout)
for row in csvreader:
for idx, val in enumerate(row):
if maxlengths[idx] > len(val):
if (is_number(val)):
row[idx] = " " * (maxlengths[idx] - len(val)) + row[idx]
else:
row[idx] += " " * (maxlengths[idx] - len(val))
csvwriter.writerow(row)
print(row)
except Exception as err:
print('Cannot write to the output file %s' % out_file_path)
print(err)
return linecount
return linecount
#pass
########################################################################
def process_file(inputfilename, outputfilename, verbose=True):
'''Count the number of lines in a file if it exists (return -1 if file doesn't exist)
Arguments:
inputfilename -- Path to the file to be processed
outputfilename - output which contains the formatted data
Return -1 if the file cannot be found otherwise return the number of lines
in the input file
'''
length_of_file = -1
if os.path.isfile(inputfilename):
length_of_file = beautifulize(inputfilename, outputfilename)
if verbose:
print("Verbose mode - Length of file: %d" % (length_of_file))
else:
if verbose:
print("Verbose mode - Length of file: %d" % (length_of_file))
else:
print("I cannot find the file [%s]" % (inputfilename))
return length_of_file
#------------------------------------------------------------------------------
# Define the main method
#------------------------------------------------------------------------------
def main():
'''The main entry of the application (i.e. The tasks should start from here)
'''
parser = argparse.ArgumentParser(description="Allign table in csv file to beautifulize.")
parser.add_argument('inputfilepath', nargs='?', help='The path to the file that you want to process.')
parser.add_argument('outputfilepath', nargs='?', help='Output file you want to save processed data ')
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", help='Verbose mode which display line count of the input file')
if len(sys.argv) == 1:
# User didn't pass any value in, show help
parser.print_help()
else:
args = parser.parse_args()
if args.inputfilepath: # If there is an input, process it
if args.outputfilepath: # If there is an ouput, write to it
process_file(args.inputfilepath, args.outputfilepath, args.verbose)
else: # If not output, just print to screen
print('No output file, outcomes will NOT be stored!')
process_file(args.inputfilepath, None, args.verbose)
pass # Do nothing, yes Python has a statement to do nothing :D
#------------------------------------------------------------------------------
# Check if this file is run as an application
#------------------------------------------------------------------------------
if __name__ == "__main__":
# If the condition is true, execute the main method
main()
| dakside/pydemo | file_handling/beautiful_csv.py | Python | mit | 7,423 |
"""Thin wrapper around Werkzeug because Flask and Bottle
do not play nicely with async uwsgi"""
import json
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.utils import redirect
from covador import ValidationDecorator, schema, list_schema
from covador.utils import merge_dicts, parse_qs
from covador.errors import error_to_json
class AppRequest(Request):
def after(self, func):
try:
handlers = self._after_request_handlers
except AttributeError:
handlers = self._after_request_handlers = []
handlers.append(func)
return func
class App:
def __init__(self):
self._url_map = Map(strict_slashes=False)
def route(self, rule, **kwargs):
def decorator(func):
kwargs['endpoint'] = func
self._url_map.add(Rule(rule, **kwargs))
return func
return decorator
def _dispatch(self, request):
adapter = self._url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return endpoint(request, **values)
except HTTPException as e:
return e
def __call__(self, env, sr):
request = AppRequest(env)
response = self._dispatch(request)
after_handlers = getattr(request, '_after_request_handlers', None)
if after_handlers:
for h in after_handlers:
response = h(response) or response
return response(env, sr)
def error_handler(ctx): # pragma: no cover
return Response(error_to_json(ctx.exception), mimetype='application/json', status=400)
def get_qs(request):
try:
return request._covador_qs
except AttributeError:
qs = request._covador_qs = parse_qs(request.environ.get('QUERY_STRING', ''))
return qs
def get_form(request):
try:
return request._covador_form
except AttributeError:
form = request._covador_form = parse_qs(request.get_data(parse_form_data=False))
return form
_query_string = lambda request, *_args, **_kwargs: get_qs(request)
_form = lambda request, *_args, **_kwargs: get_form(request)
_params = lambda request, *_args, **_kwargs: merge_dicts(get_qs(request), get_form(request))
_rparams = lambda request, *_args, **kwargs: kwargs
_json = lambda request, *_args, **_kwargs: json.loads(request.get_data(parse_form_data=False))
query_string = ValidationDecorator(_query_string, error_handler, list_schema)
form = ValidationDecorator(_form, error_handler, list_schema)
params = ValidationDecorator(_params, error_handler, list_schema)
rparams = ValidationDecorator(_rparams, error_handler, list_schema)
json_body = ValidationDecorator(_json, error_handler, schema)
| guilhermedallanol/dotfiles | vim/plugged/vial-http/server/dswf.py | Python | mit | 2,837 |
import numpy
from Orange.classification import Learner, Model
from Orange.data import ContinuousVariable
from Orange.statistics import distribution
__all__ = ["MeanLearner"]
class MeanLearner(Learner):
"""
Fit a regression model that returns the average response (class) value.
"""
name = 'mean'
def fit_storage(self, data):
"""
Construct a :obj:`MeanModel` by computing the mean value of the given
data.
:param data: data table
:type data: Orange.data.Table
:return: regression model, which always returns mean value
:rtype: :obj:`MeanModel`
"""
if not isinstance(data.domain.class_var, ContinuousVariable):
raise ValueError("regression.MeanLearner expects a domain with a "
"(single) continuous variable")
dist = distribution.get_distribution(data, data.domain.class_var)
return MeanModel(dist)
# noinspection PyMissingConstructor
class MeanModel(Model):
"""
A regression model that returns the average response (class) value.
Instances can be constructed directly, by passing a distribution to the
constructor, or by calling the :obj:`MeanLearner`.
.. automethod:: __init__
"""
def __init__(self, dist, domain=None):
"""
Construct :obj:`Orange.regression.MeanModel` that always returns the
mean value computed from the given distribution.
If the distribution is empty, it constructs a model that returns zero.
:param dist: domain for the `Table`
:type dist: Orange.statistics.distribution.Continuous
:return: regression model that returns mean value
:rtype: :obj:`MeanModel`
"""
# Don't call super().__init__ because it will raise an error since
# domain is None.
self.domain = domain
self.dist = dist
if dist.any():
self.mean = self.dist.mean()
else:
self.mean = 0.0
# noinspection PyPep8Naming
def predict(self, X):
"""
Return predictions (that is, the same mean value) for each given
instance in `X`.
:param X: data for which to make predictions
:type X: :obj:`numpy.ndarray`
:return: a vector of predictions
:rtype: :obj:`numpy.ndarray`
"""
return numpy.full(len(X), self.mean)
def __str__(self):
return 'MeanModel({})'.format(self.mean)
| qusp/orange3 | Orange/regression/mean.py | Python | bsd-2-clause | 2,474 |
#! /usr/bin/env python
# ==========================================================================
# This Python script creates the node section of the NodeFunction using
# logarithmically spaced energy bins. The intensity scale is set to the
# HESS Crab intensity (assuming a power law).
#
# Copyright (C) 2012-2016 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import sys
import math
# ============== #
# Write one node #
# ============== #
def write_node(f, energy, scale):
"""
Writes one node to XML file.
"""
# Convert to strings
str_energy = str(energy)
str_scale = str(scale)
# Write start tag
f.write(' <node>\n')
# Write energy
f.write(' <parameter scale="1e6" name="Energy"')
f.write(' min="'+str_energy+'" max="'+str_energy+'"')
f.write(' value="'+str_energy+'"')
f.write(' free="0"/>\n')
# Write intensity
f.write(' <parameter scale="'+str_scale+'" name="Intensity"')
f.write(' min="1e-5" max="1e5"')
f.write(' value="1.0"')
f.write(' free="1"/>\n')
# Write end tag
f.write(' </node>\n')
# Return
return
# ============ #
# Create nodes #
# ============ #
def create_nodes(emin, emax, enumbins):
"""
Create nodes (energies in TeV).
"""
# Open file
f = open("nodes.xml", "w")
# Set node boundaries
elogmin = math.log10(float(emin))
elogmax = math.log10(float(emax))
elogbin = (elogmax - elogmin)/(float(enumbins)-1.0)
# Fill arrays
for i in range(int(enumbins)):
# Compute energy
energy = math.pow(10.0, i*elogbin+elogmin)
# Compute scale (HESS Crab spectrum)
scale = 3.45e-17 * math.pow(energy, -2.63)
# Write node
write_node(f, energy, scale)
# Debug
#sys.stdout.write(energy)
#sys.stdout.write(math.pow(10.0, int(math.log10(scale))-1.0)±"\n")
# Close file
f.close()
# Return
return
# ======================= #
# Main script entry point #
# ======================= #
if __name__ == '__main__':
# Check command line
usage = "Usage: cta_make_nodes emin emax enumbins"
if len(sys.argv) < 3:
sys.stdout.write(usage+"\n")
sys.exit()
# Extract parameters
emin = sys.argv[1]
emax = sys.argv[2]
enumbins = sys.argv[3]
# Create nodes
create_nodes(emin, emax, enumbins)
| ctools/ctools | test/dev/cta_make_nodes.py | Python | gpl-3.0 | 3,101 |
#
# expression.py - Verilog expression parser data types
#
# Verilib - A Verilog HDL development framework
# Copyright (c) 2014, Patrick Dear, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
from ..tokens import Tokens
from ..errors import print_error, VlSyntaxError
from primary import *
class ConditionalExpression(SyntaxNode):
def __init__(self, children=[]):
SyntaxNode.__init__(self)
@staticmethod
def parse(tkns):
raise Exception("Implement me!")
class RangeExpression(SyntaxNode):
def __init__(self):
super().__init__()
@staticmethod
def parse(tkns):
""" Parse a range expression:
range_expression :=
expression
| msb_constant_expression ':' lsb_constant_expression
| base_expression '+' ':' width_constant_expression
| base_expression '-' ':' width_constant_expression
"""
raise Exception("Implement me!")
class ConstantMinTypMaxExpression(SyntaxNode):
""" Constant mintypmax expression """
def __init__(self):
super().__init__()
@staticmethod
def parse_constant_mintypmax_expression(tkns):
""" Parse a constant primary:
constant_mintypmax_expression ::=
constant_expression
| constant_expression ':' constant_expression ':'
constant_expression
"""
ce1 = ConstantExpression.parse(tkns)
if tkns.accept(Tokens.COLON):
ce2 = ConstantExpression.parse(tkns)
tkns.expect(Tokens.COLON)
ce3 = ConstantExpression.parse(tkns)
else:
return ce1
mintypmax_exp = ConstantExpression()
mintypmax_exp.children = [ce1, ce2, ce3]
return mintypmax_exp
class ConstantExpression(SyntaxNode):
""" A verilog constant expression """
def __init__(self, children=[]):
SyntaxNode.__init__(self)
self.children = children
def to_vl_string(self):
""" Transform the constant expression back into a verilog string """
if len(self.children) == 1:
return self.children[0].to_vl_string()
raise Exception("Implement me!")
@staticmethod
def _parse_Aprime(tkns):
""" Parse with our transformed, right recursive grammar:
A' :=
None
| binary_op A
| '?' A ':' A
"""
binary_op = BinaryOperator.parse(tkns)
if binary_op:
a = ConstantExpression.parse(tkns)
binary_op.children = [None, a]
return binary_op
if tkns.check(Tokens.QUESTION):
# conditional expression
raise Exception("Implement me!")
return None
@staticmethod
def _parse_A(tkns):
""" Parse with our transformed, right recursive grammar:
constant_expression (A) :=
constant_primary A'
| unary_op constant_primary A'
| string A'
"""
if tkns.check(Tokens.STRING):
s = tkns.current().text
a_prime = ConstantExpression._parse_Aprime(tkns)
return ConstantExpression([s, a_prime] if a_prime != None else [s])
uo = UnaryOperator.parse(tkns)
if uo:
cp = ConstantExpression.parse_constant_primary(tkns)
uo.children = [cp]
a_prime = ConstantExpression._parse_Aprime(tkns)
if a_prime != None:
tkns.error_here("Unary operator only has one operand")
children = [uo]
return ConstantExpression(children)
cp = ConstantExpression.parse_constant_primary(tkns)
a_prime = ConstantExpression._parse_Aprime(tkns)
if a_prime != None:
if isinstance(a_prime, BinaryOperator):
a_prime.children[0] = cp
children = [a_prime]
else:
raise Exception("Implement me!")
else:
children = [cp]
return ConstantExpression(children)
@staticmethod
def parse(tkns):
"""
constant_expression :=
constant_primary
| unary_operator constant_primary
| constant_expression binary_operator constant_expression
| constant_expression '?' constant_expression ':'
constant_expression
| string
Note that we have left recursion here... so we have to deal with that.
"""
return ConstantExpression._parse_A(tkns)
@staticmethod
def parse_constant_primary(tkns):
""" Parse a constant primary:
constant_primary ::=
constant_contatenation -- NOT IMPLEMENTED
| constant_function_call -- NOT IMPLEMENTED
| '(' constant_mintypmax_expression ')'
| number
| identifier
"""
if tkns.check(Tokens.IDENTIFIER):
return Identifier.parse(tkns)
n = Number.parse(tkns)
if n:
return n
raise Exception("Implement me!")
class Range(SyntaxNode):
""" A verilog number range, typically for wire widths, etc """
def __init__(self, hi=None, lo=None):
SyntaxNode.__init__(self)
self.high = hi
self.low = lo
self.children = [hi, lo]
@staticmethod
def parse(tkns):
""" Parse a range:
range :=
'[' msb_constant_expression ':' lsb_constant_expression ']'
"""
tkns.expect(Tokens.OPEN_BRACK)
r_hi = ConstantExpression.parse(tkns)
tkns.expect(Tokens.COLON)
r_lo = ConstantExpression.parse(tkns)
tkns.expect(Tokens.CLOSE_BRACK)
return Range(r_hi, r_lo)
| pdear/verilib | pytools/vlparse/types/expression.py | Python | lgpl-3.0 | 6,414 |
#!/usr/bin/python
import os,urllib2
os.system('clear')
def title():
print '''
-- UrduSecurity Free Tools Library --
-- Tool: %s --
-- Visit: %s --
-- Author: %s --
-- Release: %s --
UrduSecurity - a Vally of Knowledge\n\n'''%(__Script__,__visit__,__Author__,__Release__)
# define variables
__visit__ = "http://urdusecurity.blogspot.com"
__Author__ = "Muhammad Adeel | Founder UrduSecurity (c) 2014"
__Script__ = "Header Injector =Free= Version"
__Release__ = "01/07/2014"
title()
__host__ = raw_input('Enter Target Host: ')
def urdusecurity():
print '''
()
-- Thanks For Using %s --
+-+-+-+-+-+-+-+-+-+-+-+-+
|U|r|d|u|S|e|c|u|r|i|t|y|
+-+-+-+-+-+-+-+-+-+-+-+-+
\n'''% __Script__
def HTTPHeaderInjector():
pre_payload = urllib2.Request(__host__)
pre_payload.add_header('UrduSecurity-Cookie', 'Hacked-By-UrduSecurity')
pre_payload.add_header('UrduSecurity-html', '<html>Stamped By UrduSecurity<br>-Muhammad Adeel-</html>')
send_payload = urllib2.urlopen(pre_payload)
if send_payload.headers.has_key('UrduSecurity-Cookie'):
os.system('clear')
urdusecurity()
print '[+] Target is Vulnerable to HTTP Header Injection'
print send_payload.headers.items()
raw_input('Hit Enter to Exit')
elif send_payload.headers.has_key('UrduSecurity-html'):
os.system('clear')
urdusecurity()
print '[+] Target is Vulnerable to HTTP Header Injection'
print send_payload.headers.items()
raw_input('Hit Enter to Exit')
else:
os.system('clear')
urdusecurity()
print '[-] Bad Luck, Try Another Host'
raw_input('Hit Enter to Exit')
HTTPHeaderInjector()
def main():
title()
urdusecurity()
if __name__ == '__main__':
main()
| Chaudhary-Adeel/UrduSecurityFreeTools | HTTPHeaderInjectory.py | Python | gpl-2.0 | 1,665 |
import webapp2
import models
class PrefsPage(webapp2.RequestHandler):
def post(self):
userprefs = models.get_userprefs()
try:
tz_offset = int(self.request.get('tz_offset'))
userprefs.tz_offset = tz_offset
userprefs.put()
except ValueError:
# User entered a value that wasn't an integer. Ignore for now.
pass
self.redirect('/')
application = webapp2.WSGIApplication([('/prefs', PrefsPage)],
debug=True)
| jscontreras/learning-gae | pgae-examples-master/2e/python/clock/clock4/prefs.py | Python | lgpl-3.0 | 541 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import functools
from collections import OrderedDict
from django.http import Http404
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from base import models as mdl
from base.business.education_group import has_coorganization
from base.business.education_groups import general_information_sections
from base.models import academic_year
from base.models.enums.education_group_categories import Categories
from base.models.enums.education_group_types import TrainingType
from base.utils.urls import reverse_with_get
from base.views.common import display_warning_messages
from education_group.ddd import command
from education_group.ddd.business_types import *
from education_group.ddd.domain.service.identity_search import TrainingIdentitySearch
from education_group.ddd.service.read import get_group_service, get_training_service
from education_group.forms.academic_year_choices import get_academic_year_choices_for_trainings_and_mini_training
from education_group.forms.tree_version_choices import get_tree_versions_choices
from education_group.models.group_year import GroupYear
from education_group.views.mixin import ElementSelectedClipBoardMixin
from education_group.views.proxy import read
from osis_role.contrib.views import PermissionRequiredMixin
from program_management.ddd import command as command_program_management
from program_management.ddd.business_types import *
from program_management.ddd.domain.node import NodeIdentity, NodeNotFoundException
from program_management.ddd.domain.program_tree_version import version_label
from program_management.ddd.domain.service.identity_search import ProgramTreeVersionIdentitySearch
from program_management.ddd.repositories.program_tree_version import ProgramTreeVersionRepository
from program_management.ddd.service.read import node_identity_service
from program_management.forms.custom_xls import CustomXlsForm
from program_management.models.education_group_version import EducationGroupVersion
from program_management.models.element import Element
Tab = read.Tab # FIXME :: fix imports (and remove this line)
class TrainingRead(PermissionRequiredMixin, ElementSelectedClipBoardMixin, TemplateView):
# PermissionRequiredMixin
permission_required = 'base.view_educationgroup'
raise_exception = True
active_tab = None
@cached_property
def path(self):
path = self.request.GET.get('path')
if path is None:
root_element = Element.objects.get(
group_year__academic_year__year=self.kwargs['year'],
group_year__partial_acronym=self.kwargs['code'].upper()
)
path = str(root_element.pk)
return path
@cached_property
def is_root_node(self):
node_identity = node_identity_service.get_node_identity_from_element_id(
command_program_management.GetNodeIdentityFromElementId(element_id=self.get_root_id())
)
return node_identity == self.node_identity
@cached_property
def node_identity(self) -> 'NodeIdentity':
return NodeIdentity(code=self.kwargs['code'], year=self.kwargs['year'])
@cached_property
def training_identity(self) -> 'TrainingIdentity':
return TrainingIdentitySearch().get_from_program_tree_version_identity(self.program_tree_version_identity)
@cached_property
def program_tree_version_identity(self) -> 'ProgramTreeVersionIdentity':
return ProgramTreeVersionIdentitySearch().get_from_node_identity(self.node_identity)
@cached_property
def current_version(self) -> 'ProgramTreeVersion':
return ProgramTreeVersionRepository.get(self.program_tree_version_identity)
@cached_property
def education_group_version(self) -> 'EducationGroupVersion':
try:
return EducationGroupVersion.objects.select_related(
'offer', 'root_group__academic_year', 'root_group__education_group_type'
).get(
root_group__partial_acronym=self.kwargs["code"],
root_group__academic_year__year=self.kwargs["year"]
)
except (EducationGroupVersion.DoesNotExist, Element.DoesNotExist):
raise Http404
@cached_property
def group(self) -> 'Group':
get_group_cmd = command.GetGroupCommand(year=self.kwargs['year'], code=self.kwargs['code'])
return get_group_service.get_group(get_group_cmd)
@cached_property
def training(self) -> 'Training':
get_training_cmd = command.GetTrainingCommand(
year=self.kwargs['year'],
acronym=self.training_identity.acronym
)
return get_training_service.get_training(get_training_cmd)
def get_root_id(self) -> int:
return int(self.path.split("|")[0])
@cached_property
def get_object(self) -> 'Node':
try:
return self.get_tree().get_node(self.path)
except NodeNotFoundException:
root_node = self.get_tree().root_node
version_identity = self.program_tree_version_identity
message = _(
"The formation you work with doesn't exist (or is not at the same position) "
"in the tree {root.title}{version} in {root.year}."
"You've been redirected to the root {root.code} ({root.year})"
).format(
root=root_node,
version="[{}]".format(version_identity.version_name)
if version_identity and not version_identity.is_official_standard else ""
)
display_warning_messages(self.request, message)
return root_node
def get_context_data(self, **kwargs):
user_person = self.request.user.person
return {
**super().get_context_data(**kwargs),
"person": user_person,
"enums": mdl.enums.education_group_categories,
"tab_urls": self.get_tab_urls(),
"group": self.group,
"training": self.training, # TODO: Rename to training (DDD concept)
"node_path": self.path,
"form_xls_custom": CustomXlsForm(year=self.node_identity.year, code=self.node_identity.code),
"academic_year_choices": get_academic_year_choices_for_trainings_and_mini_training(
self.program_tree_version_identity,
_get_view_name_from_tab(self.active_tab),
) if self.is_root_node else None,
"selected_element_clipboard": self.get_selected_element_clipboard_message(),
"current_version": self.current_version,
"versions_choices": get_tree_versions_choices(self.node_identity, _get_view_name_from_tab(self.active_tab)),
"is_root_node": self.is_root_node,
"version_label": version_label(self.program_tree_version_identity),
# TODO: Two lines below to remove when finished reorganized templates
"education_group_version": self.education_group_version,
"group_year": self.education_group_version.root_group,
"tree_json_url": self.get_tree_json_url(),
"tree_root_id": self.get_root_id(),
"create_group_url": self.get_create_group_url(),
"create_training_url": self.get_create_training_url(),
"create_mini_training_url": self.get_create_mini_training_url(),
"update_training_url": self.get_update_training_url(),
"update_permission_name": self.get_update_permission_name(),
"delete_permanently_training_url": self.get_delete_permanently_training_url(),
"delete_permanently_tree_version_url": self.get_delete_permanently_tree_version_url(),
"delete_permanently_tree_version_permission_name":
self.get_delete_permanently_tree_version_permission_name(),
"fill_transition_version_content_url": self.get_fill_transition_version_content_url(),
"fill_transition_version_content_permission_name":
self.get_fill_transition_version_content_permission_name(),
"create_specific_version_url": self.get_create_specific_version_url(),
"create_transition_version_url": self.get_create_transition_version_url(),
"create_version_permission_name": self.get_create_version_permission_name(),
"create_transition_version_permission_name": self.get_create_transition_version_permission_name(),
"xls_ue_prerequisites": reverse("education_group_learning_units_prerequisites",
args=[self.education_group_version.root_group.academic_year.year,
self.education_group_version.root_group.partial_acronym]
),
"xls_ue_is_prerequisite": reverse("education_group_learning_units_is_prerequisite_for",
args=[self.education_group_version.root_group.academic_year.year,
self.education_group_version.root_group.partial_acronym]
),
"generate_pdf_url": reverse("group_pdf_content",
args=[self.education_group_version.root_group.academic_year.year,
self.education_group_version.root_group.partial_acronym,
]
),
"show_coorganization": has_coorganization(self.education_group_version.offer),
"view_publish_btn":
self.request.user.has_perm('base.view_publish_btn') and
(self.have_general_information_tab() or self.have_access_requirements_tab() or
self.have_skills_and_achievements_tab()),
"publish_url": self.get_publish_url(),
"active_tab": self.active_tab.name,
}
def get_permission_object(self) -> 'GroupYear':
return self.education_group_version.root_group
def get_create_group_url(self):
return reverse('create_element_select_type', kwargs={'category': Categories.GROUP.name}) + \
"?path_to={}".format(self.path)
def get_create_mini_training_url(self):
return reverse('create_element_select_type', kwargs={'category': Categories.MINI_TRAINING.name}) + \
"?path_to={}".format(self.path)
def get_create_training_url(self):
return reverse('create_element_select_type', kwargs={'category': Categories.TRAINING.name}) + \
"?path_to={}".format(self.path)
def get_update_training_url(self):
if self.current_version.is_official_standard:
return reverse_with_get(
'training_update',
kwargs={'code': self.kwargs['code'], 'year': self.kwargs['year'],
'title': self.training_identity.acronym},
get={"path_to": self.path, "tab": self.active_tab.name}
)
return reverse_with_get(
'training_version_update',
kwargs={'code': self.kwargs['code'], 'year': self.kwargs['year']},
get={"path_to": self.path, "tab": self.active_tab.name}
)
def get_update_permission_name(self) -> str:
if self.current_version.is_official_standard:
return "base.change_training"
return "program_management.change_training_version"
def get_delete_permanently_training_url(self):
if self.program_tree_version_identity.is_official_standard:
return reverse(
'training_delete',
kwargs={'year': self.node_identity.year, 'code': self.node_identity.code}
)
def get_delete_permanently_tree_version_url(self):
if not self.program_tree_version_identity.is_official_standard:
return reverse(
'delete_permanently_tree_version',
kwargs={
'year': self.node_identity.year,
'code': self.node_identity.code,
}
)
def get_delete_permanently_tree_version_permission_name(self):
return "program_management.delete_permanently_training_version"
def get_fill_transition_version_content_permission_name(self):
return "base.fill_training_version"
def get_fill_transition_version_content_url(self):
if self.is_root_node and self.program_tree_version_identity.is_transition:
return reverse(
"fill_transition_version_content",
kwargs={
'year': self.current_version.entity_id.year,
'acronym': self.current_version.entity_id.offer_acronym,
'transition_name': self.current_version.entity_id.transition_name,
'version_name': self.current_version.entity_id.version_name,
}
)
def get_create_specific_version_url(self):
if self.is_root_node and self.program_tree_version_identity.is_official_standard:
return reverse(
'create_education_group_specific_version',
kwargs={'year': self.node_identity.year, 'code': self.node_identity.code}
) + "?path={}".format(self.path)
def get_create_transition_version_url(self):
if self.is_root_node and not self.program_tree_version_identity.is_transition:
return reverse(
'create_education_group_transition_version',
kwargs={'year': self.node_identity.year, 'code': self.node_identity.code}
) + "?path={}".format(self.path)
def get_tree_json_url(self) -> str:
return reverse_with_get(
'tree_json',
kwargs={'root_id': self.get_root_id()},
get={"path": self.path}
)
def get_create_version_permission_name(self) -> str:
return "base.add_training_version"
def get_create_transition_version_permission_name(self) -> str:
return "base.add_training_transition_version"
def get_tab_urls(self):
tab_urls = OrderedDict({
Tab.IDENTIFICATION: {
'text': _('Identification'),
'active': Tab.IDENTIFICATION == self.active_tab,
'display': True,
'url': get_tab_urls(Tab.IDENTIFICATION, self.node_identity, self.path),
},
Tab.DIPLOMAS_CERTIFICATES: {
'text': _('Diplomas / Certificates'),
'active': Tab.DIPLOMAS_CERTIFICATES == self.active_tab,
'display': self.current_version.is_official_standard,
'url': get_tab_urls(Tab.DIPLOMAS_CERTIFICATES, self.node_identity, self.path),
},
Tab.ADMINISTRATIVE_DATA: {
'text': _('Administrative data'),
'active': Tab.ADMINISTRATIVE_DATA == self.active_tab,
'display': self.have_administrative_data_tab(),
'url': get_tab_urls(Tab.ADMINISTRATIVE_DATA, self.node_identity, self.path),
},
Tab.CONTENT: {
'text': _('Content'),
'active': Tab.CONTENT == self.active_tab,
'display': True,
'url': get_tab_urls(Tab.CONTENT, self.node_identity, self.path),
},
Tab.UTILIZATION: {
'text': _('Utilizations'),
'active': Tab.UTILIZATION == self.active_tab,
'display': True,
'url': get_tab_urls(Tab.UTILIZATION, self.node_identity, self.path),
},
Tab.GENERAL_INFO: {
'text': _('General informations'),
'active': Tab.GENERAL_INFO == self.active_tab,
'display': self.have_general_information_tab(),
'url': get_tab_urls(Tab.GENERAL_INFO, self.node_identity, self.path),
},
Tab.SKILLS_ACHIEVEMENTS: {
'text': capfirst(_('skills and achievements')),
'active': Tab.SKILLS_ACHIEVEMENTS == self.active_tab,
'display': self.have_skills_and_achievements_tab(),
'url': get_tab_urls(Tab.SKILLS_ACHIEVEMENTS, self.node_identity, self.path),
},
Tab.ACCESS_REQUIREMENTS: {
'text': _('Conditions'),
'active': Tab.ACCESS_REQUIREMENTS == self.active_tab,
'display': self.have_access_requirements_tab(),
'url': get_tab_urls(Tab.ACCESS_REQUIREMENTS, self.node_identity, self.path),
},
})
return read.validate_active_tab(tab_urls)
@functools.lru_cache()
def get_current_academic_year(self):
return academic_year.starting_academic_year()
def have_administrative_data_tab(self):
return self.group.type not in TrainingType.root_master_2m_types_enum() and \
self.current_version.is_official_standard
def have_general_information_tab(self):
return self.current_version.is_official_standard and \
self.group.type.name in general_information_sections.SECTIONS_PER_OFFER_TYPE
def have_skills_and_achievements_tab(self):
return self.current_version.is_official_standard and \
self.group.type.name in TrainingType.with_skills_achievements()
def have_access_requirements_tab(self):
return self.current_version.is_official_standard and \
self.group.type.name in TrainingType.with_access_requirements()
def get_publish_url(self):
return reverse('publish_general_information', args=[
self.node_identity.year,
self.node_identity.code
]) + "?path={}".format(self.path)
def _get_view_name_from_tab(tab: Tab):
return {
Tab.IDENTIFICATION: 'training_identification',
Tab.DIPLOMAS_CERTIFICATES: 'training_diplomas',
Tab.ADMINISTRATIVE_DATA: 'training_administrative_data',
Tab.CONTENT: 'training_content',
Tab.UTILIZATION: 'training_utilization',
Tab.GENERAL_INFO: 'training_general_information',
Tab.SKILLS_ACHIEVEMENTS: 'training_skills_achievements',
Tab.ACCESS_REQUIREMENTS: 'training_access_requirements',
}[tab]
def get_tab_urls(tab: Tab, node_identity: 'NodeIdentity', path: 'Path' = None) -> str:
path = path or ""
url_parameters = \
"?path={}&tab={}#achievement_".format(path, tab) if tab == Tab.SKILLS_ACHIEVEMENTS else "?path={}".format(path)
return reverse(
_get_view_name_from_tab(tab),
args=[node_identity.year, node_identity.code]
) + url_parameters
| uclouvain/osis | education_group/views/training/common_read.py | Python | agpl-3.0 | 20,155 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from iris.custom_import import import_custom_module
import logging
logger = logging.getLogger(__name__)
class IrisRoleLookupException(Exception):
pass
def get_role_lookups(config):
modules = config.get('role_lookups', [])
# default to only support user and mailing_list.
if not modules:
modules = ['user', 'mailing_list']
imported_modules = []
for m in modules:
try:
imported_modules.append(
import_custom_module('iris.role_lookup', m)(config))
logger.info('Loaded lookup modules: %s', m)
except Exception:
logger.exception('Failed to load role lookup module: %s', m)
return imported_modules
| dwang159/iris-api | src/iris/role_lookup/__init__.py | Python | bsd-2-clause | 863 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for UI Configuration
"""
import curses
class CursesControlsConfiguration():
"""
Configuration for user interface controls
.. versionadded:: 0.9
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.move_left = ['4']
self.move_up = ['8']
self.move_right = ['6']
self.move_down = ['2']
self.action_a = ['5']
self.back = [' ']
self.colours = {}
if curses.has_colors():
curses.init_pair(1, curses.COLOR_BLUE, curses.COLOR_BLACK);
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK);
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK);
curses.init_pair(4, curses.COLOR_MAGENTA, curses.COLOR_BLACK);
curses.init_pair(5, curses.COLOR_RED, curses.COLOR_BLACK);
curses.init_pair(6, curses.COLOR_WHITE, curses.COLOR_BLACK);
curses.init_pair(7, curses.COLOR_YELLOW, curses.COLOR_BLACK);
| tuturto/pyherc | src/herculeum/ui/text/config.py | Python | mit | 2,158 |
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| istalker2/yaql | yaql/functions/__init__.py | Python | apache-2.0 | 613 |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| croxis/SpaceDrive | spacedrive/renderpipeline/rplibs/yaml/yaml_py3/nodes.py | Python | mit | 1,489 |
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <[email protected]>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for Internet Explorer on Microsoft Windows.
"""
__revision__ = "$Rev$"
__date__ = "$Date$"
__author__ = "$Author$"
import os
import time
import _winreg
from win32com.shell import shellcon
from win32com.shell import shell
from shotfactory04.gui import windows
class Gui(windows.Gui):
"""
Special functions for MSIE on Windows.
"""
def reset_browser(self):
"""
Delete all files from the browser cache.
"""
cache = shell.SHGetFolderPath(0, shellcon.CSIDL_INTERNET_CACHE, 0, 0)
cache = os.path.join(cache, 'Content.IE5')
if not os.path.exists(cache):
return
if self.verbose:
print 'deleting cache', cache
for filename in os.listdir(cache):
if filename.lower() != 'index.dat':
self.delete_if_exists(os.path.join(cache, filename))
def check_version_override(self, major, minor):
"""
Raise RuntimeError if conditional comments will be broken.
"""
root_key = _winreg.HKEY_LOCAL_MACHINE
key_name = r'Software\Microsoft\Internet Explorer\Version Vector'
try:
key = _winreg.OpenKey(root_key, key_name)
registered = _winreg.QueryValueEx(key, 'IE')[0]
key.Close()
except EnvironmentError:
return
expected_minor = '%d' % minor
registered_minor = registered.split('.')[1]
requested_minor = '0'
while len(requested_minor) < len(registered_minor):
requested_minor += '0'
requested = '%s.%s' % (major, requested_minor)
if registered != requested:
print "This registry key overrides the browser version:"
print r"HKEY_LOCAL_MACHINE\%s\IE" % key_name
print "Requested version: %s, Registry override: %s" % (
requested, registered)
print "Please rename or delete the key 'IE' with regedit."
raise RuntimeError("browser version override in the registry")
def start_browser(self, config, url, options):
"""
Start browser and load website.
"""
self.major = config['major']
self.check_version_override(config['major'], config['minor'])
command = config['command'] or r'c:\progra~1\intern~1\iexplore.exe'
print 'running', command
try:
import subprocess
except ImportError:
os.spawnl(os.P_DETACH, command, os.path.basename(command), url)
else:
subprocess.Popen([command, url])
print "Sleeping %d seconds while page is loading." % options.wait
time.sleep(options.wait)
def find_scrollable(self):
"""
Find the scrollable window.
"""
if self.major < 5:
ieframe = self.find_window_by_classname('CabinetWClass')
else:
ieframe = self.find_window_by_classname('IEFrame')
frametab = self.find_child_window_by_classname(
ieframe, "Frame Tab")
if frametab:
ieframe = frametab
tabs = self.find_child_window_by_classname(
ieframe, "TabWindowClass")
if tabs:
ieframe = tabs
if self.major > 8:
return self.find_child_window_by_classname(
ieframe, "Internet Explorer_Server")
else:
return self.find_child_window_by_classname(
ieframe, "Shell DocObject View")
# Test scrolling from command line
if __name__ == '__main__':
config = {
'width': 1024,
'height': 768,
'bpp': 24,
'request': 123,
}
class Options:
verbose = 3
max_pages = 7
gui = Gui(config, Options())
gui.down()
time.sleep(1)
gui.scroll_bottom()
| skerit/shotfactory | shotfactory04/gui/windows/msie.py | Python | gpl-3.0 | 4,589 |
#!/usr/bin/env python2.7
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| haroldl/homeworklog | manage.py | Python | bsd-3-clause | 549 |
from .aliquot_admin import AliquotAdmin
from .box_admin import BoxAdmin
from .box_item_admin import BoxItemAdmin
from .box_type_admin import BoxTypeAdmin
from .consignee_admin import ConsigneeAdmin
from .fieldsets import (
requisition_fieldset,
requisition_identifier_fields,
requisition_status_fields,
requisition_status_fieldset,
requisition_identifier_fieldset,
requisition_verify_fields,
requisition_verify_fieldset)
from .manifest_admin import ManifestAdmin
from .manifest_item_admin import ManifestItemAdmin
from .modeladmin_mixins import RequisitionAdminMixin
from .order_admin import OrderAdmin
from .panel_admin import PanelAdmin
from .result_admin import ResultAdmin, ResultItemAdmin
from .shipper_admin import ShipperAdmin
| botswana-harvard/edc-lab | edc_lab/admin/__init__.py | Python | gpl-2.0 | 762 |
from twisted.internet import defer
from tint.log import Logger
from tint.storage.addressing import Path
class NotAuthorizedError(Exception):
"""
Someone is trying to access something they can't.
"""
class DefaultPermissions(object):
def __init__(self, storage):
self.storage = storage
self.log = Logger(system=self)
def accessAvailable(self, requestor, key):
def gather(results):
access = set()
for result in results:
if result is not None:
access.update(result.split(','))
return list(access)
self.log.info("Testing access for %s to %s" % (requestor, key))
ds = []
for path in Path(key).ancestors():
path = str(Path('a').join(requestor).join(path))
ds.append(self.storage.get(path, None))
return defer.gatherResults(ds).addCallback(gather)
def canAccess(self, requestor, key, optype='*'):
"""
@param key The path to the storage. Should always start with a '/'.
"""
def test(access):
return '*' in access or optype in access
d = self.accessAvailable(requestor, key)
return d.addCallback(test)
def grantAccess(self, requestor, key, optype="*"):
def test(access):
if not access:
path = Path('a').join(requestor).join(Path(key))
return self.storage.set(str(path), optype)
return defer.succeed(optype)
d = self.canAccess(requestor, key, optype)
return d.addCallback(test)
class PermissionedStorage(object):
def __init__(self, storage, permissions=None):
self.storage = storage
self.permissions = permissions or DefaultPermissions(self.storage)
def grantAccess(self, requestor, key, optype="*"):
return self.permissions.grantAccess(requestor, key, optype)
def grantAllAccess(self, requestor):
return self.permissions.grantAccess(requestor, "/")
def canAccess(self, requestor, key):
return self.permissions.canAccess(requestor, key)
def accessAvailable(self, requestor, key):
return self.permissions.accessAvailable(requestor, key)
def testAccess(self, requestor, key):
"""
Like canAccess, except throw a NotAuthorizedError if requestor
cannot access.
"""
def _test(can):
if not can:
raise NotAuthorizedError("%s cannot access %s" % (requestor, key))
d = self.canAccess(requestor, key)
return d.addCallback(_test)
def get(self, requestor, key, default=None):
d = self.testAccess(requestor, key)
return d.addCallback(lambda _: self.storage.get(key, default))
def set(self, requestor, key, value):
d = self.testAccess(requestor, key)
return d.addCallback(lambda _: self.storage.set(key, value))
def push(self, requestor, key, value):
d = self.testAccess(requestor, key)
return d.addCallback(lambda _: self.storage.push(key, value))
def ls(self, requestor, key, offset, length):
d = self.testAccess(requestor, key)
return d.addCallback(lambda _: self.storage.ls(key, offset, length))
| 8468/tint | tint/storage/permissions.py | Python | mit | 3,253 |
#!/usr/bin/env python
import unittest
from sqlbuilder import smartsql
from ascetic import exceptions, validators
from ascetic.databases import databases
from ascetic.mappers import Mapper, mapper_registry
from ascetic.relations import ForeignKey
Author = Book = None
class TestMapper(unittest.TestCase):
maxDiff = None
create_sql = {
'postgresql': """
DROP TABLE IF EXISTS ascetic_tests_author CASCADE;
CREATE TABLE ascetic_tests_author (
id serial NOT NULL PRIMARY KEY,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT
);
DROP TABLE IF EXISTS books CASCADE;
CREATE TABLE books (
id serial NOT NULL PRIMARY KEY,
title VARCHAR(255),
author_id integer REFERENCES ascetic_tests_author(id) ON DELETE CASCADE
);
""",
'mysql': """
DROP TABLE IF EXISTS ascetic_tests_author CASCADE;
CREATE TABLE ascetic_tests_author (
id INT(11) NOT NULL auto_increment,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id)
);
DROP TABLE IF EXISTS books CASCADE;
CREATE TABLE books (
id INT(11) NOT NULL auto_increment,
title VARCHAR(255),
author_id INT(11),
FOREIGN KEY (author_id) REFERENCES ascetic_tests_author(id),
PRIMARY KEY (id)
);
""",
'sqlite3': """
DROP TABLE IF EXISTS ascetic_tests_author;
CREATE TABLE ascetic_tests_author (
id INTEGER PRIMARY KEY AUTOINCREMENT,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT
);
DROP TABLE IF EXISTS books;
CREATE TABLE books (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title VARCHAR(255),
author_id INT(11),
FOREIGN KEY (author_id) REFERENCES ascetic_tests_author(id)
);
"""
}
@classmethod
def create_models(cls):
class Author(object):
def __init__(self, id=None, first_name=None, last_name=None, bio=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.bio = bio
class AuthorMapper(Mapper):
db_table = 'ascetic_tests_author'
defaults = {'bio': 'No bio available'}
validations = {'first_name': validators.Length(),
'last_name': (validators.Length(), lambda x: x != 'BadGuy!' or 'Bad last name', )}
AuthorMapper(Author)
class Book(object):
def __init__(self, id=None, title=None, author_id=None):
self.id = id
self.title = title
self.author_id = author_id
class BookMapper(Mapper):
db_table = 'books'
relationships = {
'author': ForeignKey(Author, related_name='books')
}
BookMapper(Book)
return locals()
@classmethod
def setUpClass(cls):
db = databases['default']
db.cursor().execute(cls.create_sql[db.engine])
for model_name, model in cls.create_models().items():
globals()[model_name] = model
def setUp(self):
db = databases['default']
db.identity_map.disable()
for table in ('ascetic_tests_author', 'books'):
db.execute('DELETE FROM {0}'.format(db.qn(table)))
author_mapper = mapper_registry[Author]
book_mapper = mapper_registry[Book]
james = Author(first_name='James', last_name='Joyce')
author_mapper.save(james)
kurt = Author(first_name='Kurt', last_name='Vonnegut')
author_mapper.save(kurt)
tom = Author(first_name='Tom', last_name='Robbins')
author_mapper.save(tom)
book_mapper.save(Book(title='Ulysses', author_id=james.id))
book_mapper.save(Book(title='Slaughter-House Five', author_id=kurt.id))
book_mapper.save(Book(title='Jitterbug Perfume', author_id=tom.id))
slww = Book(title='Still Life with Woodpecker', author_id=tom.id)
book_mapper.save(slww)
self.data = {
'james': james,
'kurt': kurt,
'tom': tom,
'slww': slww,
}
def test_pk(self):
book_mapper = mapper_registry[Book]
tom, slww = self.data['tom'], self.data['slww']
pk = book_mapper.get_pk(slww)
self.assertEqual(book_mapper.get_pk(slww), slww.id)
book_mapper.set_pk(slww, tom.id)
self.assertEqual(book_mapper.get_pk(slww), tom.id)
book_mapper.set_pk(slww, pk)
self.assertEqual(book_mapper.get_pk(slww), pk)
# self.assertTrue(kurt == author_mapper.get(kurt.id))
# self.assertTrue(kurt != tom)
def test_fk(self):
kurt, tom, slww = self.data['kurt'], self.data['tom'], self.data['slww']
self.assertEqual(slww.author.first_name, 'Tom')
slww.author = kurt
self.assertEqual(slww.author.first_name, 'Kurt')
del slww.author
self.assertEqual(slww.author, None)
slww.author = None
self.assertEqual(slww.author, None)
slww.author = tom.id
self.assertEqual(slww.author.first_name, 'Tom')
def test_o2m(self):
tom = self.data['tom']
self.assertEqual(len(list(tom.books)), 2)
def test_retrieval(self):
author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book]
tom = self.data['tom']
# Test retrieval
b = book_mapper.get(title='Ulysses')
a = author_mapper.get(id=b.author_id)
self.assertEqual(a.id, b.author_id)
a = author_mapper.query.where(author_mapper.sql_table.id == b.id)[:]
# self.assert_(isinstance(a, list))
self.assert_(isinstance(a, smartsql.Q))
self.assertEqual(len(list(tom.books)), 2)
def test_update(self):
author_mapper = mapper_registry[Author]
kurt = self.data['kurt']
kid = kurt.id
new_last_name = 'Vonnegut, Jr.'
a = author_mapper.get(id=kid)
a.last_name = new_last_name
author_mapper.save(a)
a = author_mapper.get(kid)
self.assertEqual(a.last_name, new_last_name)
def test_count(self):
author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book]
self.assertEqual(author_mapper.query.count(), 3)
self.assertEqual(len(book_mapper.query.clone()), 4)
self.assertEqual(len(book_mapper.query.clone()[1:4]), 3)
def test_delete(self):
author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book]
kurt = self.data['kurt']
author_mapper.delete(kurt)
self.assertEqual(author_mapper.query.count(), 2)
self.assertEqual(len(book_mapper.query.clone()), 3)
def test_validation(self):
author_mapper = mapper_registry[Author]
a = Author(first_name='', last_name='Ted')
self.assertRaises(exceptions.ValidationError, author_mapper.validate, a)
def test_defaults(self):
author_mapper = mapper_registry[Author]
a = Author(first_name='Bill and', last_name='Ted')
author_mapper.save(a)
self.assertEqual(a.bio, 'No bio available')
a = Author(first_name='I am a', last_name='BadGuy!')
self.assertRaises(exceptions.ValidationError, author_mapper.validate, a)
def test_smartsql(self):
author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book]
slww = self.data['slww']
fields = [smartsql.compile(i)[0] for i in author_mapper.get_sql_fields()]
self.assertListEqual(
fields,
['"ascetic_tests_author"."id"',
'"ascetic_tests_author"."first_name"',
'"ascetic_tests_author"."last_name"',
'"ascetic_tests_author"."bio"', ]
)
# self.assertEqual(smartsql.compile(book_mapper.sql_table.author)[0], '"books"."author_id"')
smartsql.auto_name.counter = 0
self.assertEqual(
smartsql.compile(book_mapper.query.where(book_mapper.sql_table.author.id == 1)),
('SELECT "books"."id", "books"."title", "books"."author_id" FROM "books" INNER '
'JOIN "ascetic_tests_author" AS "_auto_1" ON ("books"."author_id" = '
'"_auto_1"."id") WHERE "_auto_1"."id" = %s',
[1])
)
smartsql.auto_name.counter = 0
self.assertEqual(
smartsql.compile(author_mapper.query.where(
(book_mapper.sql_table.author.id == 1) & (book_mapper.sql_table.author.first_name == 'Ivan')
)),
('SELECT "ascetic_tests_author"."id", "ascetic_tests_author"."first_name", '
'"ascetic_tests_author"."last_name", "ascetic_tests_author"."bio" FROM '
'"ascetic_tests_author" INNER JOIN "ascetic_tests_author" AS "_auto_1" ON '
'("books"."author_id" = "_auto_1"."id") INNER JOIN "ascetic_tests_author" AS '
'"_auto_2" ON ("books"."author_id" = "_auto_2"."id") WHERE "_auto_1"."id" = '
'%s AND "_auto_2"."first_name" = %s',
[1, 'Ivan'])
)
smartsql.auto_name.counter = 0
author_table = book_mapper.sql_table.author
self.assertEqual(
smartsql.compile(author_mapper.query.where(
(author_table.id == 1) & (author_table.first_name == 'Ivan')
)),
('SELECT "ascetic_tests_author"."id", "ascetic_tests_author"."first_name", '
'"ascetic_tests_author"."last_name", "ascetic_tests_author"."bio" FROM '
'"ascetic_tests_author" INNER JOIN "ascetic_tests_author" AS "_auto_1" ON '
'("books"."author_id" = "_auto_1"."id") WHERE "_auto_1"."id" = %s AND '
'"_auto_1"."first_name" = %s',
[1, 'Ivan'])
)
q = author_mapper.query
self.assertEqual(smartsql.compile(q)[0], '''SELECT "ascetic_tests_author"."id", "ascetic_tests_author"."first_name", "ascetic_tests_author"."last_name", "ascetic_tests_author"."bio" FROM "ascetic_tests_author"''')
self.assertEqual(len(q), 3)
for obj in q:
self.assertTrue(isinstance(obj, Author))
q = q.where(author_mapper.sql_table.id == slww.author_id)
self.assertEqual(smartsql.compile(q)[0], """SELECT "ascetic_tests_author"."id", "ascetic_tests_author"."first_name", "ascetic_tests_author"."last_name", "ascetic_tests_author"."bio" FROM "ascetic_tests_author" WHERE "ascetic_tests_author"."id" = %s""")
self.assertEqual(len(q), 1)
self.assertTrue(isinstance(q[0], Author))
def test_prefetch(self):
author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book]
q = book_mapper.query.prefetch('author').order_by(book_mapper.sql_table.id)
for obj in q:
self.assertTrue(hasattr(obj, '_cache'))
self.assertTrue('author' in obj._cache)
self.assertEqual(obj._cache['author'], obj.author)
for obj in author_mapper.query.prefetch('books').order_by(author_mapper.sql_table.id):
self.assertTrue(hasattr(obj, '_cache'))
self.assertTrue('books' in obj._cache)
self.assertEqual(len(obj._cache['books']._cache), len(obj.books))
for i in obj._cache['books']._cache:
self.assertEqual(i._cache['author'], obj)
| emacsway/ascetic | ascetic/tests/test_mappers.py | Python | mit | 11,813 |
import time
from azure.storage import AccessPolicy
from azure.storage.blob import BlockBlobService, ContentSettings, ContainerPermissions
from datetime import datetime, timedelta
# The name of the new Shared Access policy
policy_name = 'readandlistonly'
# The Storage Account Name
storage_account_name = 'mystore'
storage_account_key = 'mykey'
storage_container_name = 'mycontainer'
example_file_path = '..\\sampledata\\sample.log'
policy_name = 'mysaspolicy'
# Create the blob service, using the name and key for your Azure Storage account
blob_service = BlockBlobService(storage_account_name, storage_account_key)
# Create the container, if it does not already exist
blob_service.create_container(storage_container_name)
# Upload an example file to the container
blob_service.create_blob_from_path(
storage_container_name,
'sample.log',
example_file_path,
)
# Create a new policy that expires after a week
access_policy = AccessPolicy(permission=ContainerPermissions.READ + ContainerPermissions.LIST, expiry=datetime.utcnow() + timedelta(weeks=1))
# Get the existing identifiers (policies) for the container
identifiers = blob_service.get_container_acl(storage_container_name)
# And add the new one ot the list
identifiers[policy_name] = access_policy
# Set the container to the updated list of identifiers (policies)
blob_service.set_container_acl(
storage_container_name,
identifiers,
)
# Wait 30 seconds for acl to propagate
time.sleep(30)
# Generate a new Shared Access Signature token using the policy (by name)
sas_token = blob_service.generate_container_shared_access_signature(
storage_container_name,
id=policy_name,
)
# Print out the new token
print(sas_token) | Azure-Samples/hdinsight-dotnet-python-azure-storage-shared-access-signature | Python/SASToken.py | Python | mit | 1,714 |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from core.modules.basic_modules import new_constant
#################################################################################
## The ListOfElements Module
def list_conv(l):
if (l[0] != '[') and (l[-1] != ']'):
raise ValueError('List from String in VisTrails should start with \
"[" and end with "]".')
else:
l = eval(l)
return l
ListOfElements = new_constant('ListOfElements' , staticmethod(list_conv), [],\
staticmethod(lambda x: type(x) == list))
| CMUSV-VisTrails/WorkflowRecommendation | vistrails/packages/controlflow/list_module.py | Python | bsd-3-clause | 2,364 |
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from itertools import chain
from mock import Mock, sentinel
from nose.tools import eq_, raises
from six.moves import xrange
from smarkets.signal import Signal
class SignalTest(unittest.TestCase):
"Test the `smarkets.Callback` class"
def setUp(self):
"Set up the tests"
self.callback = Signal()
def tearDown(self):
"Tear down the test requirements"
self.callback = None
def test_simple_fire(self):
"Test the simple case where the handler fires"
handler = Mock()
self.callback += handler
self.assertFalse(handler.called)
self.assertEquals(1, len(self.callback))
self.callback(message=sentinel.message)
handler.assert_called_once_with(message=sentinel.message)
self.assertEquals(1, len(self.callback))
def test_unhandle(self):
"Test the case where a handler is removed"
handler = Mock()
self.callback += handler
self.assertFalse(handler.called)
self.assertEquals(1, len(self.callback))
self.callback -= handler
self.assertEquals(0, len(self.callback))
self.callback(message=sentinel.message)
self.assertFalse(handler.called)
def test_2_handlers(self):
"Test 2 handlers both get called"
handler1 = Mock()
handler2 = Mock()
self.callback += handler1
self.callback += handler2
self.assertFalse(handler1.called)
self.assertFalse(handler2.called)
self.assertEquals(2, len(self.callback))
self.callback(message=sentinel.message)
handler1.assert_called_once_with(message=sentinel.message)
handler2.assert_called_once_with(message=sentinel.message)
self.assertEquals(2, len(self.callback))
def test_many_handlers(self):
"General version of `test_2_handlers`"
handlers = [Mock() for _ in xrange(1, 100)]
for handler in handlers:
self.callback += handler
self.assertEquals(len(handlers), len(self.callback))
for handler in handlers:
self.assertFalse(handler.called)
self.callback(message=sentinel.message)
for handler in handlers:
handler.assert_called_once_with(message=sentinel.message)
self.assertEquals(len(handlers), len(self.callback))
def test_many_unhandle(self):
"Unhandle many"
real_handlers = [Mock() for _ in xrange(1, 100)]
to_unhandle = [Mock() for _ in xrange(1, 20)]
for handler in chain(real_handlers, to_unhandle):
self.callback += handler
self.assertEquals(
len(real_handlers) + len(to_unhandle), len(self.callback))
for handler in to_unhandle:
self.callback -= handler
self.assertEquals(len(real_handlers), len(self.callback))
self.callback(message=sentinel.message)
for handler in to_unhandle:
self.assertFalse(handler.called)
for handler in real_handlers:
handler.assert_called_once_with(message=sentinel.message)
def test_handle_exception(self):
"Test that an exception is raised by the callback method"
handler = Mock(side_effect=self._always_raise)
self.callback += handler
self.assertRaises(Exception, self.callback, message=sentinel.message)
def test_2_handle_exception(self):
"Test that an exception is raised by the callback method"
handler1 = Mock(side_effect=self._always_raise)
handler2 = Mock()
self.callback += handler1
self.callback += handler2
self.assertRaises(Exception, self.callback, message=sentinel.message)
# Because the collection of handlers in the `Signal` is a
# `set` the 'firing' order is undefined. However, if handler2
# is called, we assert that it is called correctly here.
if handler2.called:
handler2.assert_called_once_with(message=sentinel.message)
@staticmethod
def _always_raise(*args, **kwargs):
"Always raise `Exception` with no arguments"
raise Exception()
@raises(KeyError)
def test_removing_non_existing_handler_fails():
e = Signal()
e -= 'irrelevant'
def test_removing_handler_works():
e = Signal()
h = Mock()
e += h
e -= h
e.fire()
eq_(h.called, False)
| smarkets/smk_python_sdk | smarkets/tests/signal.py | Python | mit | 4,449 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Joel Grand-guillaume (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import project
| avanzosc/avanzosc6.1 | project_department/__init__.py | Python | agpl-3.0 | 1,414 |
import datetime
import logging
import textwrap
import time
import click
import hatarake
import hatarake.net as requests
from hatarake.config import Config
logger = logging.getLogger(__name__)
@click.group()
@click.option('-v', '--verbosity', count=True)
def main(verbosity):
logging.basicConfig(level=logging.WARNING - verbosity * 10)
logging.getLogger('gntp').setLevel(logging.ERROR - verbosity * 10)
@main.command()
@click.option('--start', help='start time')
@click.argument('duration', type=int)
@click.argument('title')
def submit(start, duration, title):
'''Submit a pomodoro to the server'''
config = Config(hatarake.CONFIG_PATH)
api = config.get('server', 'api')
token = config.get('server', 'token')
response = requests.post(
api,
headers={
'Authorization': 'Token %s' % token,
},
data={
'created': start,
'duration': duration,
'title': title,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.option('--duration', type=int, default=2)
@click.option('--api_server', envvar='HATARAKE_API_SERVER')
@click.option('--api_token', envvar='HATARAKE_API_TOKEN')
@click.argument('title')
def append(duration, title, api_server=None, api_token=None):
'''Append time to a pomodoro'''
config = Config(hatarake.CONFIG_PATH)
api = api_server if api_server else config.get('server', 'api')
token = api_token if api_token else config.get('server', 'token')
end = datetime.datetime.utcnow().replace(microsecond=0)
start = end - datetime.timedelta(minutes=duration)
# Split the tags out of the title
# For now, we remove the tags from the final title to make things neater
# but in the future, may want to leave the hash tag in the full title
tags = {tag.strip("#") for tag in title.split() if tag.startswith("#")}
title = ' '.join({tag for tag in title.split() if not tag.startswith('#')})
response = requests.post(
api + '/append',
headers={
'Authorization': 'Token %s' % token,
},
data={
'start': start.isoformat(),
'end': end.isoformat(),
'category': tags,
'title': title,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.option('--api_server', envvar='HATARAKE_API_SERVER')
@click.option('--api_token', envvar='HATARAKE_API_TOKEN')
@click.argument('label')
@click.argument('duration', type=int)
def countdown(api_server, api_token, label, duration):
'''Submit a new countdown'''
config = Config(hatarake.CONFIG_PATH)
api = api_server if api_server else config.get('countdown', 'api')
token = api_token if api_token else config.get('countdown', 'token')
created = datetime.datetime.now() + datetime.timedelta(minutes=duration)
response = requests.put(
api,
headers={
'Authorization': 'Token %s' % token,
},
data={
'created': created.replace(microsecond=0).isoformat(),
'label': label,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.argument('key')
@click.argument('value')
def stat(key, value):
'''Submit stat data to server'''
config = Config(hatarake.CONFIG_PATH)
response = requests.post(
config.get('stat', 'api'),
headers={
'Authorization': 'Token %s' % config.get('stat', 'token'),
},
data={
'key': key,
'value': value,
}
)
logger.info('POSTing to %s %s', response.request.url, response.request.body)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.argument('name', default='heartbeat')
def heartbeat(name):
config = Config(hatarake.CONFIG_PATH)
url = config.get('prometheus', 'pushgateway')
payload = textwrap.dedent('''
# TYPE {name} gauge
# HELP {name} Last heartbeat based on unixtimestamp
{name} {time}
''').format(name=name, time=int(time.time())).lstrip()
response = requests.post(url, data=payload)
response.raise_for_status()
click.echo(response.text)
| kfdm/hatarake | hatarake/cli.py | Python | mit | 4,255 |
from flask import Blueprint, url_for
core = Blueprint('core', __name__)
from . import filters
| MrLeeh/flaskhab | app/core/__init__.py | Python | mit | 94 |
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Radio Impairments Model
# Author: mettus
# Generated: Thu Aug 1 12:46:10 2013
##################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from gnuradio import analog
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
import math
#Import locally
from .phase_noise_gen import phase_noise_gen
from .iqbal_gen import iqbal_gen
from .distortion_2_gen import distortion_2_gen
from .distortion_3_gen import distortion_3_gen
class impairments(gr.hier_block2):
def __init__(self, phase_noise_mag=0, magbal=0, phasebal=0, q_ofs=0, i_ofs=0, freq_offset=0, gamma=0, beta=0):
gr.hier_block2.__init__(
self, "Radio Impairments Model",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
##################################################
self.phase_noise_mag = phase_noise_mag
self.magbal = magbal
self.phasebal = phasebal
self.q_ofs = q_ofs
self.i_ofs = i_ofs
self.freq_offset = freq_offset
self.gamma = gamma
self.beta = beta
##################################################
# Blocks
##################################################
self.channels_phase_noise_gen_0_0 = phase_noise_gen(math.pow(10.0,phase_noise_mag / 20.0), .01)
self.channels_iqbal_gen_0 = iqbal_gen(magbal, phasebal)
self.channels_distortion_3_gen_0 = distortion_3_gen(beta)
self.channels_distortion_2_gen_0 = distortion_2_gen(gamma)
self.blocks_multiply_xx_0_0 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_conjugate_cc_0 = blocks.conjugate_cc()
self.blocks_add_const_vxx_0 = blocks.add_const_vcc((i_ofs + q_ofs* 1j, ))
self.analog_sig_source_x_0 = analog.sig_source_c(1.0, analog.GR_COS_WAVE, freq_offset, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.channels_phase_noise_gen_0_0, 0), (self.channels_distortion_3_gen_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_multiply_xx_0_0, 0), (self.channels_phase_noise_gen_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_conjugate_cc_0, 0))
self.connect((self, 0), (self.blocks_multiply_xx_0_0, 1))
self.connect((self.blocks_conjugate_cc_0, 0), (self.blocks_multiply_xx_0_0, 0))
self.connect((self.channels_iqbal_gen_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.channels_distortion_3_gen_0, 0), (self.channels_distortion_2_gen_0, 0))
self.connect((self.channels_distortion_2_gen_0, 0), (self.channels_iqbal_gen_0, 0))
# QT sink close method reimplementation
def get_phase_noise_mag(self):
return self.phase_noise_mag
def set_phase_noise_mag(self, phase_noise_mag):
self.phase_noise_mag = phase_noise_mag
self.channels_phase_noise_gen_0_0.set_noise_mag(math.pow(10.0,self.phase_noise_mag / 20.0))
def get_magbal(self):
return self.magbal
def set_magbal(self, magbal):
self.magbal = magbal
self.channels_iqbal_gen_0.set_magnitude(self.magbal)
def get_phasebal(self):
return self.phasebal
def set_phasebal(self, phasebal):
self.phasebal = phasebal
self.channels_iqbal_gen_0.set_phase(self.phasebal)
def get_q_ofs(self):
return self.q_ofs
def set_q_ofs(self, q_ofs):
self.q_ofs = q_ofs
self.blocks_add_const_vxx_0.set_k((self.i_ofs + self.q_ofs* 1j, ))
def get_i_ofs(self):
return self.i_ofs
def set_i_ofs(self, i_ofs):
self.i_ofs = i_ofs
self.blocks_add_const_vxx_0.set_k((self.i_ofs + self.q_ofs* 1j, ))
def get_freq_offset(self):
return self.freq_offset
def set_freq_offset(self, freq_offset):
self.freq_offset = freq_offset
self.analog_sig_source_x_0.set_frequency(self.freq_offset)
def get_gamma(self):
return self.gamma
def set_gamma(self, gamma):
self.gamma = gamma
self.channels_distortion_2_gen_0.set_beta(self.gamma)
def get_beta(self):
return self.beta
def set_beta(self, beta):
self.beta = beta
self.channels_distortion_3_gen_0.set_beta(self.beta)
| jdemel/gnuradio | gr-channels/python/channels/impairments.py | Python | gpl-3.0 | 4,926 |
def fib(n):
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
def fib_fast(n):
from math import sqrt
s5 = sqrt(5)
x = (1 + s5) ** n
y = (1 - s5) ** n
return int((x - y)/(s5 * 2**n))
def print_fib(n):
for i in range(n):
print fib(i),
print
for i in range(n):
print fib_fast(i),
def print_fib2(n):
fibs = [0, 1]
a, b = 0, 1
if n == 0:
print a
elif n == 1:
print a, b
else:
print 0, 1,
for i in range(2, n):
a, b = b, a + b
print b,
if __name__ == "__main__":
print_fib(10)
print
print_fib2(10)
| familug/FAMILUG | Python/fibs.py | Python | bsd-2-clause | 668 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", null=True)),
],
)
]
| beckastar/django | tests/migrations/test_migrations/0002_second.py | Python | bsd-3-clause | 592 |
# -*- coding: utf-8 -*-
#
# Avendesora documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 12 12:01:56 2017.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = '''
sphinx.ext.autodoc
sphinx.ext.coverage
sphinx.ext.doctest
sphinx.ext.napoleon
sphinx.ext.todo
sphinx.ext.viewcode
'''.split()
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'avendesora'
copyright = u'2017-2022, Ken Kundert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '1.22.0'
# The short X.Y version.
version = '.'.join(release.split('.')[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'nature'
# Use default rather than my normal nature so we get the read-the-docs style on
# that website.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'avendesoradoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'avendesora.tex', u'Avendesora Documentation',
u'Ken Kundert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'avendesora', u'Avendesora Documentation',
[u'Ken Kundert'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Avendesora', u'Avendesora Documentation',
u'Ken Kundert', 'Avendesora', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#KSK: add custom css code if present
def setup(app):
import os
if os.path.exists('.static/css/custom.css'):
app.add_stylesheet('css/custom.css')
# KSK: the following is needed by read-the-docs because they do not install
# gobject-introspection-1.0
#
#autodoc_mock_inports = ['pygobject']
#
#used doc/requirements.txt instead
| KenKundert/avendesora | doc/conf.py | Python | gpl-3.0 | 8,400 |
"""
A PyGraphQT timetrace plot figure with a slider to scroll the time axis
back and forth.
Adapted from:
http://stackoverflow.com/questions/16824718/python-matplotlib-pyside-fast-timetrace-scrolling
"""
from PySide import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
N_SAMPLES = 1e6
def test_plot():
time = np.arange(N_SAMPLES)*1e-3
sample = np.random.randn(N_SAMPLES)
plt = pg.PlotWidget(title="Use the slider to scroll and the spin-box to set the width")
plt.addLegend()
plt.plot(time, sample, name="Gaussian noise")
q = ScrollingToolQT(plt)
return q # WARNING: it's important to return this object otherwise
# python will delete the reference and the GUI will not respond!
class ScrollingToolQT(object):
def __init__(self, fig):
# Setup data range variables for scrolling
self.fig = fig
self.xmin, self.xmax = fig.plotItem.vb.childrenBounds()[0]
self.step = 1 # axis units
self.scale = 1e3 # conversion betweeen scrolling units and axis units
# Retrive the QMainWindow used by current figure and add a toolbar
# to host the new widgets
self.win = QtGui.QMainWindow()
self.win.show()
self.win.resize(800,600)
self.win.setCentralWidget(fig)
self.toolbar = QtGui.QToolBar()
self.win.addToolBar(QtCore.Qt.BottomToolBarArea, self.toolbar)
# Create the slider and spinbox for x-axis scrolling in toolbar
self.set_slider(self.toolbar)
self.set_spinbox(self.toolbar)
# Set the initial xlimits coherently with values in slider and spinbox
self.set_xlim = self.fig.setXRange
self.set_xlim(0, self.step)
def set_slider(self, parent):
# Slider only support integer ranges so use ms as base unit
smin, smax = self.xmin*self.scale, self.xmax*self.scale
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, parent=parent)
self.slider.setTickPosition(QtGui.QSlider.TicksAbove)
self.slider.setTickInterval((smax-smin)/10.)
self.slider.setMinimum(smin)
self.slider.setMaximum(smax-self.step*self.scale)
self.slider.setSingleStep(self.step*self.scale/5.)
self.slider.setPageStep(self.step*self.scale)
self.slider.setValue(0) # set the initial position
self.slider.valueChanged.connect(self.xpos_changed)
parent.addWidget(self.slider)
def set_spinbox(self, parent):
self.spinb = QtGui.QDoubleSpinBox(parent=parent)
self.spinb.setDecimals(3)
self.spinb.setRange(0.001, 3600.)
self.spinb.setSuffix(" s")
self.spinb.setValue(self.step) # set the initial width
self.spinb.valueChanged.connect(self.xwidth_changed)
parent.addWidget(self.spinb)
def xpos_changed(self, pos):
#pprint("Position (in scroll units) %f\n" %pos)
# self.pos = pos/self.scale
pos /= self.scale
self.set_xlim(pos, pos + self.step, padding=0)
def xwidth_changed(self, xwidth):
#pprint("Width (axis units) %f\n" % step)
if xwidth <= 0: return
self.step = xwidth
self.slider.setSingleStep(self.step*self.scale/5.)
self.slider.setPageStep(self.step*self.scale)
old_xlim = self.fig.plotItem.vb.viewRange()[0]
self.xpos_changed(old_xlim[0] * self.scale)
if __name__ == "__main__":
app = pg.mkQApp()
q = test_plot()
#app.exec_()
| chungjjang80/FRETBursts | fretbursts/utils/examples/timetrace_scroll_pygraphqt.py | Python | gpl-2.0 | 3,476 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-11 06:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ims_app', '0007_auto_20170411_1228'),
]
operations = [
migrations.AlterField(
model_name='stockin',
name='created_date',
field=models.DateTimeField(),
),
]
| FarhadurFahim/ims | ims_app/migrations/0008_auto_20170411_1256.py | Python | mit | 447 |
from django.http import HttpResponseBadRequest, HttpResponse
from ..simplified import PermissionDenied, SimplifiedException, InvalidUsername
from restview import RestfulView, extjswrap
from serializers import (serializers, SerializableResult,
ErrorMsgSerializableResult,
ForbiddenSerializableResult,
InvalidUsernameSerializableResult)
from readform import ReadForm
class HttpResponseCreated(HttpResponse):
status_code = 201
class FormErrorSerializableResult(SerializableResult):
def _create_error_msg_from_form(self, form):
fielderrors = dict(form.errors)
non_field_errors = list(form.non_field_errors())
return dict(fielderrors=fielderrors, errormessages=non_field_errors)
def __init__(self, form, use_extjshacks):
error = self._create_error_msg_from_form(form)
error = extjswrap(error, use_extjshacks, success=False)
super(FormErrorSerializableResult, self).__init__(error, httpresponsecls=HttpResponseBadRequest)
class MultiFormErrorSerializableResult(FormErrorSerializableResult):
def __init__(self, list_of_forms, use_extjshacks):
list_of_errors = [self._create_error_msg_from_form(form) for form in list_of_forms]
errors = extjswrap(list_of_errors, use_extjshacks, success=False)
super(FormErrorSerializableResult, self).__init__(errors, httpresponsecls=HttpResponseBadRequest)
class ModelRestfulView(RestfulView):
"""
:class:`ModelRestfulView` is used in conjunction with the
:class:`restful_modelapi`-decorator to autogenerate a RESTful
interface for a simplified class (see :ref:`simplified`).
"""
@classmethod
def filter_urlmap(cls, itemdct):
if not hasattr(cls._meta, 'urlmap'):
return itemdct
for fieldname, mapping in cls._meta.urlmap.iteritems():
url = mapping.restfulcls.get_rest_url(itemdct[mapping.idfield])
itemdct[fieldname] = url
return itemdct
@classmethod
def filter_resultitem(cls, itemdct):
return cls.filter_urlmap(itemdct)
def restultqry_to_list(self, qryresultwrapper):
return [self.__class__.filter_resultitem(itemdct) \
for itemdct in qryresultwrapper]
def extra_create_or_replace_responsedata(self, obj_id):
""" If this does not return ``None``, the return-value is added to the
``extra_responsedata`` attribute of the data returned on ``create()`` or
``update()``.
:param obj_id: The id of the object changed by the create or update call.
"""
return None
def _deserialize_and_validate_many(self, list_of_field_values):
list_of_deserialized_field_values = []
errors = []
for field_values in list_of_field_values:
form = self.__class__.EditForm(field_values)
if form.is_valid():
cleaned = form.cleaned_data
pk = field_values.get('pk')
if pk:
cleaned['pk'] = pk
list_of_deserialized_field_values.append(cleaned)
else:
errors.append(form)
return errors, list_of_deserialized_field_values
def _create_or_replace_many(self, list_of_field_values, update=False):
errors, list_of_deserialized_field_values = self._deserialize_and_validate_many(list_of_field_values)
if errors:
return MultiFormErrorSerializableResult(errors, self.use_extjshacks)
responsedata = []
if update:
responsedata = self._meta.simplified.updatemany(self.request.user, *list_of_deserialized_field_values)
else:
responsedata = self._meta.simplified.createmany(self.request.user, *list_of_deserialized_field_values)
result = self.extjswrapshortcut(responsedata)
if update:
return SerializableResult(result)
else:
return SerializableResult(result, httpresponsecls=HttpResponseCreated)
def _create_or_replace_single(self, data, instance=None):
form = self.__class__.EditForm(data, instance=instance)
if form.is_valid():
try:
if instance == None:
id = self._meta.simplified.create(self.request.user, **form.cleaned_data)
else:
id = self._meta.simplified.update(self.request.user, instance.pk, **form.cleaned_data)
except PermissionDenied, e:
return ForbiddenSerializableResult(e)
except InvalidUsername, e:
return InvalidUsernameSerializableResult(e.username)
data['id'] = id
extra = self.extra_create_or_replace_responsedata(id)
if extra != None:
data['extra_responsedata'] = extra
result = self.extjswrapshortcut(data)
if instance == None:
return SerializableResult(result, httpresponsecls=HttpResponseCreated)
else:
return SerializableResult(result)
else:
return FormErrorSerializableResult(form, self.use_extjshacks)
def _load_getdata(self):
if 'getdata_in_qrystring' in self.request.GET or self.use_extjshacks: # NOTE: For easier ExtJS integration
return True, self.request.GET
else:
try:
return False, serializers.deserialize(self.comformat, self.request.raw_post_data)
except ValueError, e:
raise ValueError(('Bad request data: {0}. Perhaps you ment to'
'send GET data as a querystring? In that case, add '
'getdata_in_qrystring=1 to your querystring.'.format(e)))
def _parse_extjs_sort(self, sortlist):
orderby = []
for sortitem in sortlist:
fieldname = sortitem['property']
if sortitem.get('direction', 'ASC') == 'DESC':
fieldname = '-' + fieldname
orderby.append(fieldname)
return orderby
def crud_search(self, request):
""" Maps to the ``search`` method of the simplified class. """
try:
fromGET, getdata = self._load_getdata()
except ValueError, e:
return ErrorMsgSerializableResult(str(e),
httpresponsecls=HttpResponseBadRequest)
form = self.__class__.SearchForm(getdata)
if form.is_valid():
cleaned_data = form.cleaned_data
if 'sort' in cleaned_data:
sort = cleaned_data['sort']
del cleaned_data['sort']
if not cleaned_data.get('orderby'):
if sort and self.use_extjshacks:
orderby = self._parse_extjs_sort(sort)
cleaned_data['orderby'] = orderby
if 'filter' in cleaned_data:
f = cleaned_data['filter']
if f:
cleaned_data['filters'] = f
del cleaned_data['filter']
try:
qryresultwrapper = self._meta.simplified.search(self.request.user, **cleaned_data)
except SimplifiedException, e:
return ErrorMsgSerializableResult(str(e),
httpresponsecls=HttpResponseBadRequest)
resultlist = self.restultqry_to_list(qryresultwrapper)
result = self.extjswrapshortcut(resultlist, total=qryresultwrapper.total)
return SerializableResult(result)
else:
return FormErrorSerializableResult(form, self.use_extjshacks)
def crud_read(self, request, id):
""" Maps to the ``read`` method of the simplified class. """
try:
fromGET, getdata = self._load_getdata()
except ValueError, e:
return ErrorMsgSerializableResult(str(e),
httpresponsecls=HttpResponseBadRequest)
form = ReadForm(getdata)
if form.is_valid():
cleaned_data = form.cleaned_data
try:
data = self._meta.simplified.read(self.request.user, id, **cleaned_data)
except PermissionDenied, e:
return ForbiddenSerializableResult(e)
data = self.extjswrapshortcut(data)
return SerializableResult(data)
else:
return FormErrorSerializableResult(form, self.use_extjshacks)
def _deserialize_create_or_replace_request(self):
data = serializers.deserialize(self.comformat, self.request.raw_post_data)
if isinstance(data, list):
return data, False
else:
return data, True
def crud_create(self, request):
""" Maps to the ``create`` method of the simplified class. """
data, is_single = self._deserialize_create_or_replace_request()
if is_single:
return self._create_or_replace_single(data)
else:
return self._create_or_replace_many(data, update=False)
def crud_update(self, request, id=None):
""" Maps to the ``update`` method of the simplified class. """
data, is_single = self._deserialize_create_or_replace_request()
if is_single:
try:
instance = self._meta.simplified._meta.model.objects.get(pk=id)
except self._meta.simplified._meta.model.DoesNotExist, e:
return ForbiddenSerializableResult(e)
return self._create_or_replace_single(data, instance)
else:
return self._create_or_replace_many(data, update=True)
def _delete_many(self):
if 'deletedata_in_qrystring' in self.request.GET:
try:
raw_data = self.request.GET['pks']
except KeyError, e:
return ErrorMsgSerializableResult('The querystring must contain a JSON encoded array of primary-keys in the "pks" attribute.',
httpresponsecls=HttpResponseBadRequest)
else:
raw_data = self.request.raw_post_data
list_of_pks = serializers.deserialize(self.comformat, raw_data)
if not isinstance(list_of_pks, list):
return ErrorMsgSerializableResult('Requires "pks" as a JSON encoded array.',
httpresponsecls=HttpResponseBadRequest)
pks = self._meta.simplified.deletemany(self.request.user, *list_of_pks)
result = self.extjswrapshortcut(pks, total=len(pks))
return SerializableResult(result)
def crud_delete(self, request, id=None):
""" Maps to the ``delete`` method of the simplified class. """
is_single = id != None
if is_single:
try:
self._meta.simplified.delete(request.user, id)
except PermissionDenied, e:
return ForbiddenSerializableResult(e)
else:
data = self.extjswrapshortcut(dict(id=id))
return SerializableResult(data)
else:
return self._delete_many()
| vegarang/devilry-django | devilry/restful/modelrestview.py | Python | bsd-3-clause | 11,188 |
"""
Run the .travis.yml file
"""
import yaml
import subprocess
import sys
__version__ = '0.0.1'
def run_scripts(travis, stage):
"""
Given an travis configuration and a stage, run its scripts if any
"""
stage_scripts = travis.get(stage, [])
success = True
for script in stage_scripts:
script_process = subprocess.Popen(
script,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = script_process.communicate()
returncode = int(script_process.returncode)
if returncode > 0:
sys.stdout.write(out)
sys.stderr.write(err)
success = False
return success
def process(args):
"""
Do travis stuff
"""
action_stages = {
'install': [
'before_install',
'install',
'after_install'
],
'run': [
'before_script',
'script',
'after_script'
]
}
travis = None
with open('.travis.yml', 'r') as yaml_file:
travis = yaml.load(yaml_file)
# This would be for testing a matrix of environment variables?
# envs = travis.get('env', [])
# for env in envs:
# print 'setting environment variable', env
git_options = travis.get('git', {})
git_submodules = git_options.get('submodules', True)
if git_submodules:
print 'initializing and updating git submodules'
all_success = True
for action, stages in action_stages.items():
if action in args.actions:
for stage in stages:
success = run_scripts(travis, stage)
all_success = success and all_success
if 'run' in args.actions:
if all_success:
stage = 'after_success'
else:
stage = 'after_failure'
run_scripts(travis, stage)
| jmullan/pytravis | pytravis/pytravis.py | Python | gpl-3.0 | 1,904 |
"""Module for the custom Django sampledata command."""
import csv
import random
from django.core import management
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.gis.geos import Point
from allauth.account.models import EmailAddress
from resources.models import (
Language,
TechnologicalArea,
ProgressOutcome,
YearLevel,
CurriculumLearningArea,
)
from tests.resources.factories import (
ResourceFactory,
NZQAStandardFactory,
)
# Events
from events.models import (
Location,
Series,
)
from tests.events.factories import (
SponsorFactory,
OrganiserFactory,
EventFactory,
)
# DTTA
from tests.dtta.factories import (
NewsArticleFactory,
PageFactory,
ProjectFactory,
RelatedLinkFactory,
)
# POET
from tests.poet.factories import (
POETFormResourceFactory,
POETFormSubmissionFactory,
POETFormProgressOutcomeGroupFactory,
)
class Command(management.base.BaseCommand):
"""Required command class for the custom Django sampledata command."""
help = "Add sample data to database."
def handle(self, *args, **options):
"""Automatically called when the sampledata command is given."""
if settings.DEPLOYMENT_TYPE == 'prod' and not settings.DEBUG:
raise management.base.CommandError(
'This command can only be executed in DEBUG mode on non-production website.'
)
# Clear all data
management.call_command('flush', interactive=False)
print('Database wiped.')
User = get_user_model()
# Create admin account
admin = User.objects.create_superuser(
'admin',
'[email protected]',
password=settings.SAMPLE_DATA_ADMIN_PASSWORD,
first_name='Admin',
last_name='Account'
)
EmailAddress.objects.create(
user=admin,
email=admin.email,
primary=True,
verified=True
)
print('Admin created.')
# Create user account
user = User.objects.create_user(
'user',
'[email protected]',
password=settings.SAMPLE_DATA_USER_PASSWORD,
first_name='Alex',
last_name='Doe'
)
EmailAddress.objects.create(
user=user,
email=user.email,
primary=True,
verified=True
)
print('User created.')
# Resources
Language.objects.create(name='English', css_class='language-en')
Language.objects.create(name='Māori', css_class='language-mi')
print('Languages created.')
curriculum_learning_areas = {
'English': 'english',
'Arts': 'arts',
'Health and physical education': 'health-pe',
'Learning languages': 'languages',
'Mathematics and statistics': 'mathematics',
'Science': 'science',
'Social sciences': 'social-sciences',
'Technology': 'technology',
}
for area_name, area_css_class in curriculum_learning_areas.items():
CurriculumLearningArea.objects.create(
name=area_name,
css_class=area_css_class,
)
print('Curriculum learning areas created.')
ta_ct = TechnologicalArea.objects.create(
name='Computational thinking',
abbreviation='CT',
css_class='ta-ct',
)
for i in range(1, 9):
ProgressOutcome.objects.create(
name='Computational thinking - Progress outcome {}'.format(i),
abbreviation='CT PO{}'.format(i),
technological_area=ta_ct,
css_class='po-ct',
)
ta_dddo = TechnologicalArea.objects.create(
name='Designing and developing digital outcomes',
abbreviation='DDDO',
css_class='ta-dddo',
)
for i in range(1, 7):
ProgressOutcome.objects.create(
name='Designing and developing digital outcomes - Progress outcome {}'.format(i),
abbreviation='DDDO PO{}'.format(i),
technological_area=ta_dddo,
css_class='po-dddo',
)
print('Technological areas created.')
print('Progress outcomes created.')
NZQAStandardFactory.create_batch(size=20)
for i in range(0, 14):
YearLevel.objects.create(
level=i
)
print('NZQA standards created.')
ResourceFactory.create_batch(size=20)
print('Resources created.')
# Events
SponsorFactory.create_batch(size=10)
print('Event sponsors created.')
OrganiserFactory.create_batch(size=10)
print('Event organisers created.')
event_series = {
(
'Computer Science for High Schools',
'CS4HS',
),
(
'Computer Science for Primary Schools',
'CS4PS',
),
(
'Computer Science for Professional Development',
'CS4PD',
),
(
'Code Club for Teachers',
'CC4T',
),
}
for (name, abbreviation) in event_series:
Series.objects.create(
name=name,
abbreviation=abbreviation,
)
print('Event series created.')
region_codes = dict()
for (code, name) in Location.REGION_CHOICES:
region_codes[name] = code
with open('general/management/commands/sample-data/nz-schools.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in random.sample(list(reader), 100):
if row['Longitude'] and row['Latitude'] and row['Region']:
Location.objects.create(
name=row['Name'],
street_address=row['Street'],
suburb=row['Suburb'],
city=row['City'],
region=region_codes[row['Region']],
coords=Point(
float(row['Longitude']),
float(row['Latitude'])
),
)
print('Event locations created.')
EventFactory.create_batch(size=50)
print('Events created.')
# DTTA
NewsArticleFactory.create_batch(size=20)
print('DTTA news articles created.')
PageFactory.create_batch(size=5)
print('DTTA pages created.')
ProjectFactory.create_batch(size=5)
print('DTTA projects created.')
RelatedLinkFactory.create_batch(size=10)
print('DTTA related links created.')
# POET
management.call_command('load_poet_data')
POETFormResourceFactory.create_batch(size=20)
print('POET resources created.')
POETFormProgressOutcomeGroupFactory.create_batch(size=6)
print('POET progress outcome groups created.')
POETFormSubmissionFactory.create_batch(size=800)
print('POET submissions created.')
| uccser/cs4teachers | dthm4kaiako/general/management/commands/sampledata.py | Python | mit | 7,305 |
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import connection
from collections import defaultdict
import os
from datetime import datetime, timedelta
from common.utils import Date
from schools.models import (
School, Boundary)
from stories.models import (
Question, Questiongroup, QuestionType,
QuestiongroupQuestions, Source, UserType,
Story, Answer)
from optparse import make_option
from collections import OrderedDict
from django.db.models import Q, Count
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, cm
from reportlab.platypus import Paragraph, Table, TableStyle, Image
from reportlab.platypus.flowables import HRFlowable
from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT, TA_CENTER
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from common.utils import send_attachment
class Command(BaseCommand):
help = """
Description:
Generates a report on SMS data. Accepts
either a --from and --to date pair or a --days
parameter. Days will be calculated backwards from
today. Also accepts --emails as a list of email
IDs for the generated report to be sent to.
Usage:
./manage.py generate_sms_report [--from=YYYY-MM-DD] [--to=YYYY-MM-DD] \
[--days=number_of_days] [email protected],[email protected]
"""
option_list = BaseCommand.option_list + (
make_option('--from',
help='Start date'),
make_option('--to',
help='End date'),
make_option('--days',
help='Number of days'),
make_option('--emails',
help='Comma separated list of email ids'),
)
def handle(self, *args, **options):
start_date = options.get('from')
end_date = options.get('to')
days = options.get('days')
emails = options.get('emails')
if not emails:
print """
Error:
--emails parameter not specificed.
"""
print self.help
return
elif days:
end_date = datetime.today()
start_date = end_date - timedelta(days=int(days))
elif start_date or end_date:
if not (start_date and end_date):
print """
Error:
Please specify both --from and --to parameters.
"""
print self.help
return
date = Date()
if start_date:
sane = date.check_date_sanity(start_date)
if not sane:
print """
Error:
Wrong --from format. Expected YYYY-MM-DD
"""
print self.help
return
else:
start_date = date.get_datetime(start_date)
if end_date:
sane = date.check_date_sanity(end_date)
if not sane:
print """
Error:
Wrong --to format. Expected YYYY-MM-DD
"""
print self.help
return
else:
end_date = date.get_datetime(end_date)
else:
print self.help
return
emails = emails.split(",")
districts = []
gka_district_ids = set(
Story.objects.filter(
group__source__name="sms"
).values_list(
'school__admin3__parent__parent__id',
flat=True
)
)
for district_id in gka_district_ids:
district = Boundary.objects.get(id=district_id)
admin1_json = { 'name': district.name, 'id': district.id}
admin1_json['sms'] = self.get_story_meta(district.id,'district',start_date, end_date)
admin1_json['details'] = self.get_story_details(district.id,'district',start_date, end_date)
admin1_json['blocks'] = []
#print admin1_json
blocks = (Boundary.objects.all_active().filter(
parent_id=district_id,
type=district.type
).select_related('boundarycoord__coord', 'type__name',
'hierarchy__name'))
for block in blocks:
admin2_json = { 'name': block.name, 'id': block.id}
admin2_json['sms'] = self.get_story_meta(block.id,'block', start_date, end_date)
admin2_json['details'] = self.get_story_details(block.id,'block', start_date, end_date)
#if(int(admin2_json['sms']['stories']) > 0):
admin1_json['blocks'].append(admin2_json)
districts.append(admin1_json)
for each in districts:
blks = self.transform_data(each)
for blk in blks:
self.make_pdf(blk,start_date,end_date,blk[0][1],emails)
def make_pdf(self, data, start_date, end_date, filename, emails):
width, height = A4
styles = getSampleStyleSheet()
styleN = styles["BodyText"]
styleN.alignment = TA_LEFT
styleN.fontName = 'Helvetica'
styleN.textColor = colors.black
styleBH = styles["Heading3"]
styleBH.alignment = TA_CENTER
styleBH.fontName = 'Helvetica'
styleBH.textColor = colors.darkslategray
styleTH = styles["Heading1"]
styleTH.alignment = TA_CENTER
styleTH.fontName = 'Helvetica'
styleTH.textColor = colors.darkslateblue
styleGH = styles["Heading2"]
styleGH.alignment = TA_CENTER
styleGH.fontName = 'Helvetica'
styleGH.textColor = colors.darkslategray
#styleGH.backColor = colors.lightgrey
styleNC = styles["BodyText"]
#styleNC.alignment = TA_CENTER
styleNC.fontName = 'Helvetica'
def coord(x, y, unit=1):
x, y = x * unit, height - y * unit
return x, y
def style_row(row_array, style):
styled_array = []
for each in row_array:
styled_array.extend([Paragraph(str(each),style)])
return styled_array
c = canvas.Canvas(os.path.join(settings.PDF_REPORTS_DIR, 'gka_sms/')+filename+".pdf", pagesize=A4)
#logo
logo_image = Image("%s/images/akshara_logo.jpg" % settings.STATICFILES_DIRS)
logo_image.drawOn(c, *coord(14, 3, cm))
#HR
hr = HRFlowable(width="80%", thickness=1, lineCap='round', color=colors.lightgrey, spaceBefore=1, spaceAfter=1, hAlign='CENTER', vAlign='BOTTOM', dash=None)
hr.wrapOn(c, width, height)
hr.drawOn(c, *coord(1.8, 3.2, cm))
#Headings
header = Paragraph('GKA SMS Summary<br/><hr/>', styleTH)
header.wrapOn(c, width, height)
header.drawOn(c, *coord(0, 4, cm))
#Date Range
date_range = Paragraph("From " + start_date.strftime("%d %b, %Y") + " to " + end_date.strftime("%d %b, %Y"), styleBH)
date_range.wrapOn(c, width, height)
date_range.drawOn(c, *coord(0, 4.5, cm))
#Details
styled_data = [style_row(data[0],styleGH)]
for row in data[1:4]:
styled_data.append(style_row(row,styleN))
table_header = Table(styled_data, colWidths=[7 * cm,
5* cm, 5 * cm])
table_header.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.lightgrey),
('BOX', (0,0), (-1,-1), 0.25, colors.lightgrey),
('LINEBELOW', (0,0), (2, 0), 1.0, colors.darkgrey),
('LINEBELOW', (0,3), (2, 3), 1.0, colors.darkgrey),
]))
table_header.wrapOn(c, width, height)
table_header.drawOn(c, *coord(1.8, 9, cm))
#Questions
styled_data =[style_row(['Questions','Yes','No','Yes','No'],styleBH)]
for row in data[4:len(data)]:
styled_data.append(style_row(row,styleN))
table = Table(styled_data, colWidths=[7 * cm,
2.5 * cm, 2.5 * cm,
2.5 * cm, 2.5 * cm])
table.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.lightgrey),
('BOX', (0,0), (-1,-1), 0.25, colors.lightgrey),
#('LINEBELOW', (0,0), (2, 0), 1.0, colors.green),
]))
table.wrapOn(c, width, height)
table.drawOn(c, *coord(1.8, 17.5, cm))
#Footer
#HR
hr = HRFlowable(width="80%", thickness=1, lineCap='round', color=colors.lightgrey, spaceBefore=1, spaceAfter=1, hAlign='CENTER', vAlign='BOTTOM', dash=None)
hr.wrapOn(c, width, height)
hr.drawOn(c, *coord(1.8, 27, cm))
#Disclaimer
klp_text = Paragraph("This report has been generated by Karnataka Learning Partnership(www.klp.org.in/gka) for Akshara Foundation.",styleN)
klp_text.wrapOn(c, width, height)
klp_text.drawOn(c, *coord(1.8, 27.5, cm))
c.save()
self.send_email(start_date.strftime("%d/%m/%Y") + " to " + end_date.strftime("%d/%m/%Y"),filename, emails)
def send_email(self,date_range, block, emails):
print 'Sending email for', block
send_attachment(
from_email=settings.EMAIL_DEFAULT_FROM,
to_emails=emails,
subject= block + ': GKA SMS Report for '+ date_range,
folder='gka_sms',
filename=block
)
def get_json(self, source, stories_qset):
json = {}
json['stories'] = stories_qset.count()
json['schools'] = stories_qset.distinct('school').count()
return json
def transform_data(self, district):
blocks =[]
questions = {}
gka_question_seq = ['How many responses indicate that Math classes were happening in class 4 and 5 during the visit?',
'How many responses indicate that class 4 and 5 math teachers are trained in GKA methodology in the schools visited?',
'How many responses indicate evidence of Ganitha Kalika Andolana TLM being used in class 4 or 5 during the visit?',
'How many responses indicate that representational stage was being practiced during Math classes in class 4 and 5 during the visit?',
'How many responses indicate that group work was happening in the schools visited?']
for block in district["blocks"]:
data = [["Details", "Block-"+block["name"].capitalize(), "District-"+district["name"].capitalize()]]
data.append(["Schools", block["sms"]["schools"], district["sms"]["schools"]])
data.append(["SMS Messages", block["sms"]["stories"], district["sms"]["stories"]])
data.append(["Schools with SMS Messages", block["sms"]["schools_with_stories"], district["sms"]["schools_with_stories"]])
for each in block["details"]:
questions[each["question"]["display_text"]]= {"block": self.get_response_str(each["answers"])}
for each in district["details"]:
questions[each["question"]["display_text"]]["district"] = self.get_response_str(each["answers"])
custom_sort = self.make_custom_sort([ gka_question_seq ])
result = custom_sort(questions)
for question in result:
row = [question]
row.extend(questions[question]["block"])
row.extend(questions[question]["district"])
data.append(row)
blocks.append(data)
return blocks
def make_custom_sort(self,orders):
orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in stuff.items()]
keys = set(stuff)
for order in orders:
if keys.issuperset(order):
return OrderedDict(sorted(l, key=lambda x: order.get(x[0], 0)))
return OrderedDict(sorted(l))
if isinstance(stuff, list):
return [process(x) for x in stuff]
return stuff
return process
def get_response_str(self, answers):
yes = 0
no = 0
if answers["options"]:
if "Yes" in answers["options"]:
yes = answers["options"]["Yes"]
if "No" in answers["options"]:
no = answers["options"]["No"]
#return [str(yes)+'('+str((yes*100)/(yes+no))+'%)',str(no)+'('+str((no*100)/(yes+no))+'%)']
return [str(yes),str(no)]
else:
return ["No Responses","-"]
def source_filter(self, source, stories_qset):
stories_qset = stories_qset.filter(
group__source__name=source)
return stories_qset
def get_story_meta(self, boundary_id, boundary_type, start_date, end_date):
source = 'sms'
admin2_id = None
admin1_id = None
if boundary_type == 'block':
admin2_id = boundary_id
if boundary_type == 'district':
admin1_id = boundary_id
school_type = 'Primary School'
school_qset = School.objects.filter(
admin3__type__name=school_type, status=2)
stories_qset = Story.objects.filter(
school__admin3__type__name=school_type)
if admin1_id:
school_qset = school_qset.filter(
schooldetails__admin1__id=admin1_id)
stories_qset = stories_qset.filter(
school__schooldetails__admin1__id=admin1_id)
if admin2_id:
school_qset = school_qset.filter(
schooldetails__admin2__id=admin2_id)
stories_qset = stories_qset.filter(
school__schooldetails__admin2__id=admin2_id)
if start_date:
stories_qset = stories_qset.filter(
date_of_visit__gte=start_date)
if end_date:
stories_qset = stories_qset.filter(
date_of_visit__lte=end_date)
if source:
stories_qset = self.source_filter(
source,
stories_qset
)
#print stories_qset.count()
response_json = {}
response_json['schools'] = school_qset.count()
response_json['stories'] = stories_qset.count()
response_json['schools_with_stories'] = stories_qset.distinct('school').count()
#print response_json
return response_json
def get_que_and_ans(self, stories, source, school_type):
response_list = []
questions = Question.objects.all().select_related('question_type')
if source:
questions = questions.filter(
questiongroup__source__name=source)
if school_type:
questions = questions.filter(
school_type__name=school_type)
#print questions.count()
for question in questions.distinct('id'):
j = {}
j['question'] = {}
j['question']['key'] = question.key
j['question']['text'] = question.text
j['question']['display_text'] = question.display_text
j['answers'] = {}
j['answers']['question_type'] = question.question_type.name
answer_counts = question.answer_set.filter(
story__in=stories
).values('text').annotate(answer_count=Count('text'))
options = {}
for count in answer_counts:
options[count['text']] = count['answer_count']
j['answers']['options'] = options
response_list.append(j)
return response_list
def get_story_details(self, boundary_id, boundary_type, start_date, end_date):
source = 'sms'
admin1_id = None
admin2_id = None
school_type = 'Primary School'
if boundary_type == 'block':
admin2_id = boundary_id
if boundary_type == 'district':
admin1_id = boundary_id
stories = Story.objects.all()
if source:
stories = stories.filter(group__source__name=source)
if school_type:
stories = stories.filter(school__admin3__type__name=school_type)
if admin1_id:
stories = stories.filter(
school__schooldetails__admin1__id=admin1_id
)
if admin2_id:
stories = stories.filter(
school__schooldetails__admin2__id=admin2_id
)
if start_date:
stories = stories.filter(date_of_visit__gte=start_date)
if end_date:
stories = stories.filter(date_of_visit__lte=end_date)
response_json = self.get_que_and_ans(stories, source, school_type)
return response_json
| klpdotorg/dubdubdub | apps/stories/management/commands/generate_sms_report.py | Python | mit | 17,179 |
#!/usr/bin/python
# Copyright (c) 2015 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
required: false
default: None
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
required: false
default: false
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
required: false
default: false
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
required: false
default: None
ephemeral:
description:
- A string used for filtering flavors based on the amount of ephemeral
storage. Format is the same as the I(ram) parameter
required: false
default: false
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
# less than 30gb of ephemeral storage.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
ephemeral=dict(required=False, default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
['name', 'ephemeral']
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
ephemeral = module.params['ephemeral']
limit = module.params['limit']
try:
cloud = shade.openstack_cloud(**module.params)
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if ephemeral:
filters['ephemeral'] = ephemeral
if filters:
# Range search added in 1.5.0
if StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 needed for this functionality")
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| noroutine/ansible | lib/ansible/modules/cloud/openstack/os_flavor_facts.py | Python | gpl-3.0 | 7,373 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the BashOperator."""
from datetime import timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import days_ago
args = {
'owner': 'airflow',
'start_date': days_ago(2),
}
dag = DAG(
dag_id='example_bash_operator',
default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60),
tags=['example']
)
run_this_last = DummyOperator(
task_id='run_this_last',
dag=dag,
)
# [START howto_operator_bash]
run_this = BashOperator(
task_id='run_after_loop',
bash_command='echo 1',
dag=dag,
)
# [END howto_operator_bash]
run_this >> run_this_last
for i in range(3):
task = BashOperator(
task_id='runme_' + str(i),
bash_command='echo "{{ task_instance_key_str }}" && sleep 1',
dag=dag,
)
task >> run_this
# [START howto_operator_bash_template]
also_run_this = BashOperator(
task_id='also_run_this',
bash_command='echo "run_id={{ run_id }} | dag_run={{ dag_run }}"',
dag=dag,
)
# [END howto_operator_bash_template]
also_run_this >> run_this_last
if __name__ == "__main__":
dag.cli()
| wooga/airflow | airflow/example_dags/example_bash_operator.py | Python | apache-2.0 | 2,042 |
#!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from cafa_validation_utils import get_valid_keywords
pr_field = re.compile("^PR=[0,1]\.[0-9][0-9];$")
rc_field = re.compile("^RC=[0,1]\.[0-9][0-9]$")
hpo_field = re.compile("^HP:[0-9]{5,7}$")
target_field = re.compile("^T[0-9]{5,20}$")
confidence_field = re.compile("^[0,1]\.[0-9][0-9]$")
legal_states1 = ["author","model","keywords","accuracy","hpo_prediction","end"]
legal_states2 = ["author","model","keywords","hpo_prediction","end"]
legal_states3 = ["author","model","hpo_prediction","end"]
legal_keywords = get_valid_keywords()
def author_check(inrec):
correct = True
errmsg = None
fields = [i.strip() for i in inrec.split()]
if len(fields) != 2:
correct = False
errmsg = "AUTHOR: invalid number of fields. Should be 2"
elif fields[0] != "AUTHOR":
correct = False
errmsg = "AUTHOR: First field should be AUTHOR"
return correct, errmsg
def model_check(inrec):
correct = True
errmsg = None
fields = [i.strip() for i in inrec.split()]
if len(fields) != 2:
correct = False
errmsg = "MODEL: invalid number of fields. Should be 2"
elif fields[0] != "MODEL":
correct = False
errmsg = "MODEL: First field should be MODEL"
elif len(fields[1]) != 1 or not fields[1].isdigit():
correct = False
errmsg = "MODEL: second field should be single digit."
return correct, errmsg
def keywords_check(inrec):
correct = True
errmsg = None
if inrec[:8] != "KEYWORDS":
correct = False
errmsg = "KEYWORDS: first field should be KEYWORDS"
else:
keywords = [i.strip() for i in inrec[8:].split(",")]
for keyword in keywords:
# stupid full stop
if keyword[-1] == ".":
keyword = keyword[:-1]
if keyword not in legal_keywords:
correct = False
errmsg = "KEYWORDS: illegal keyword %s" % keyword
break
return correct, errmsg
def accuracy_check(inrec):
correct = True
errmsg = None
fields = [i.strip() for i in inrec.split()]
if len(fields) != 4:
correct = False
errmsg = "ACCURACY: error in number of fields. Should be 4"
elif fields[0] != "ACCURACY":
correct = False
errmsg = "ACCURACY: first field should be 'ACCURACY'"
elif not fields[1].isdigit() or len(fields[1]) != 1:
correct = False
errmsg = "ACCURACY: second field should be a single digit"
elif not pr_field.match(fields[2]):
correct = False
errmsg = "ACCURACY: error in PR field"
elif not rc_field.match(fields[3]):
correct = False
errmsg = "ACCURACY: error in RC field"
return correct, errmsg
def hpo_prediction_check(inrec):
correct = True
errmsg = None
fields = [i.strip() for i in inrec.split()]
if len(fields) != 3:
correct = False
errmsg = "HPO prediction: wrong number of fields. Should be 3"
elif not target_field.match(fields[0]):
correct = False
errmsg = "HPO prediction: error in first (Target ID) field"
elif not hpo_field.match(fields[1]):
correct = False
errmsg = "HPO prediction: error in second (HP ID) field"
elif not confidence_field.match(fields[2]):
correct = False
errmsg = "GO prediction: error in third (confidence) field"
elif float(fields[2]) > 1.0:
correct = False
errmsg = "GO prediction: error in third (confidence) field"
return correct, errmsg
def end_check(inrec):
correct = True
errmsg = None
fields = [i.strip() for i in inrec.split()]
if len(fields) != 1:
correct = False
errmsg = "END: wrong number of fields. Should be 1"
elif fields[0] != "END":
correct = False
errmsg = "END: record should include the word END only"
return correct, errmsg
"""
Function builds the error message to incorporate the filename and what line the error was raised on.
Returns the status of whether the line is correct and the error message if one exists.
"""
def handle_error(correct, errmsg, inrec, line_num, fileName):
if not correct:
line = "Error in %s, line %s, " % (fileName, line_num)
return False, line + errmsg
else:
return True, "Nothing wrong here"
def cafa_checker(infile, fileName):
"""
Main program that: 1. identifies fields; 2. Calls the proper checker function; 3. calls the
error handler "handle_error" which builds the error report. If correct is False, the function returns correct, errmsg
to the file_name_check function in cafa3_format_checker.
"""
visited_states = []
s_token = 0
n_accuracy = 0
first_prediction = True
first_accuracy = True
first_keywords = True
n_models = 0
line_num = 0
for inline in infile:
try:
inline = inline.decode()
except AttributeError:
pass
line_num += 1
inrec = [i.strip() for i in inline.split()]
field1 = inrec[0]
# Check which field type (state) we are in
if field1 == "AUTHOR":
state = "author"
elif field1 == "MODEL":
state = "model"
elif field1 == "KEYWORDS":
state = "keywords"
elif field1 == "ACCURACY":
state = "accuracy"
elif field1 == "END":
state = "end"
else: #default to prediction state
state = "hpo_prediction"
# print "****"
# print "FIELD1", field1
# print inline, state
# Check for errors according to state
if state == "author":
correct,errmsg = author_check(inline)
correct, errmsg = handle_error(correct, errmsg, inline, line_num, fileName)
if not correct:
return correct, errmsg
visited_states.append(state)
elif state == "model":
n_models += 1
n_accuracy = 0
if n_models > 3:
return False, "Too many models. Only up to 3 allowed"
correct,errmsg = model_check(inline)
correct, errmsg = handle_error(correct, errmsg, inline, line_num, fileName)
if not correct:
return correct, errmsg
if n_models == 1:
visited_states.append(state)
elif state == "keywords":
if first_keywords:
visited_states.append(state)
first_keywords = False
correct, errmsg = keywords_check(inline)
correct, errmsg = handle_error(correct, errmsg, inline, line_num, fileName)
if not correct:
return correct, errmsg
elif state == "accuracy":
if first_accuracy:
visited_states.append(state)
first_accuracy = False
n_accuracy += 1
if n_accuracy > 3:
correct, errmsg = handle_error(False, "ACCURACY: too many ACCURACY records", inline, line_num, fileName)
if not correct:
return correct, errmsg
else:
correct, errmsg = accuracy_check(inline)
if not correct:
return correct, errmsg
elif state == "hpo_prediction":
correct, errmsg = hpo_prediction_check(inline)
correct, errmsg = handle_error(correct, errmsg, inline, line_num, fileName)
if not correct:
return correct, errmsg
if first_prediction:
visited_states.append(state)
first_prediction = False
elif state == "end":
correct, errmsg = end_check(inline)
correct, errmsg = handle_error(correct, errmsg, inline, line_num, fileName)
if not correct:
return correct, errmsg
visited_states.append(state)
# End file forloop
if (visited_states != legal_states1 and
visited_states != legal_states2 and
visited_states != legal_states3):
errmsg = "Error in " + fileName + "\n"
errmsg += "Sections found in the file: [" + ", ".join(visited_states) + "]\n"
errmsg += "file not formatted according to CAFA 4 specs\n"
errmsg += "Check whether all these record types are in your file in the correct order\n"
errmsg += "AUTHOR, MODEL, KEYWORDS, ACCURACY (optional), predictions, END"
return False, errmsg
else:
return True, "%s, passed the CAFA 4 HPO prediction format checker" % fileName
| idoerg/cafa-format-check | cafa_hpo_format_checker.py | Python | gpl-3.0 | 9,287 |
#!/usr/bin/env python
# -*- encoding: utf-8
import csv
import gzip
from collections import defaultdict
from ec_settings import POS, NEG
class CorpusReader(object):
def _csv_to_dict(self, fn):
"""
Read in the contents of a CSV file and place the contents
in a dict of dicts.
The CSV file is supposed to be of a form:
val_1,val_2,val_3
where:
- 'val_1' is a type of emotion ('pos'/'neg')
- 'val_2' is a term (bigram/trigram etc.)
- 'val_3' is a frequency of occurence of 'val_2'
in the context of 'val_1'
Return a tuple of two values:
- a dict of dicts: adict[val_1][val_2] = val_3
- a tuple of all val_2s (ie. terms/bigrams/trigrams)
"""
terms = set([])
raw_data = self._read_csv(fn)
data = defaultdict(lambda: defaultdict(int))
for aline in raw_data:
term, pos_freq, neg_freq = aline
data[POS][term] = pos_freq
data[NEG][term] = neg_freq
terms.add(term)
return data, terms
def _read_csv(self, fn):
csv_f = gzip.open(fn)
csv_reader = csv.reader(csv_f)
data = []
for arow in csv_reader:
data.append(arow)
csv_f.close()
return data
| wojtekwalczak/EmoClassifier | src/corpus_reader.py | Python | mit | 1,285 |
# -*- coding: utf-8 -*-
"""Helper functions for libComXml
"""
__all__ = ['codi_periode', 'codi_dh', 'extreu_periode', 'rodes',
'codi_refact', 'nom_refact', 'codi_reg_refact', 'nom_reg_refact',
'parse_totals_refact']
CODIS_REFACT = {'RT42011': '40',
'RT12012': '41',
'RM42012': '42'}
CODIS_REG_REFACT = {'RGT42011': '40',
'RGT12012': '41',
'RGM42012': '42'}
def rodes(giro):
"""Retorna el nombre de rodes senceres segons el giro
"""
return len(str(giro)) - 1
def extreu_periode(name):
"""Extreu el nom del període del name de la lectura
"""
if '(' not in name:
return name
return name.split('(')[-1].split(')')[0]
def codi_periode(codi_dh, periode):
"""Retorna el codi OCSUM del periode segons
http://172.26.0.42:2500/wiki/show/Codificacio_Periodes_OCSUM
Taula 42 del document d'OCSUM:
OCSUM - E - Tablas de códigos 2012.05.23.doc
:param codi_dh: codi dh de la tarifa
:param periode: nom del periode en format Px on x = {1...6}
"""
if codi_dh == '1':
return '10'
else:
return '%s%s' % (codi_dh, periode[-1])
def codi_dh(tarifa, nlectures=6):
"""Retorna el codi ocsum de discriminació horaria
Taules 35 i 107 del document d'OCSUM:
OCSUM - E - Tablas de códigos 2012.05.23.doc
:param tarifa: codi de la tarifa
:param nlectures: nombre de lectures
"""
if tarifa in ('001', '005'):
return '1'
elif tarifa in ('004', '006'):
return '2'
elif tarifa in ('003', '012', '013', '014', '015', '016'):
return '6'
elif tarifa == '011':
if nlectures == 6:
return '6'
else:
return '3'
elif tarifa in ('007', '008'):
return '8'
def codi_refact(producte):
"""Retorna el codi ocsum de refacturació
:param producte: nom del producte
"""
return CODIS_REFACT.get(producte, False)
def nom_refact(producte):
"""Retorna el nom del producte
:param producte: codi ocsum del producte
"""
ref = dict(((v, k) for k, v in CODIS_REFACT.items()))
return ref.get(producte, False)
def codi_reg_refact(producte):
"""Retorna el codi ocsum de refacturació
:param producte: nom del producte
"""
return CODIS_REG_REFACT.get(producte, False)
def nom_reg_refact(producte):
"""Retorna el nom del producte
:param producte: codi ocsum del producte
"""
ref = dict(((v, k) for k, v in CODIS_REG_REFACT.items()))
return ref.get(producte, False)
def parse_totals_refact(cadena):
"""Retorna els totals de les línies de refacturacio"""
totals = []
for i, x in enumerate(cadena.split(' ')):
if i in (4, 7):
totals.append(float(x))
return totals[0], totals[1]
| Som-Energia/switching | switching/helpers/funcions.py | Python | gpl-3.0 | 2,866 |
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
import inspect
import textwrap
from sympy.external import import_module
from sympy.core.compatibility import exec_, is_sequence, iterable, string_types, range
from sympy.utilities.decorator import doctest_depends_on
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
NUMEXPR = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
NUMEXPR_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"Abs": "fabs",
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"Abs": "abs",
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"oo": "inf",
"re": "real",
"SparseMatrix": "array",
"ImmutableSparseMatrix": "array",
"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array",
"ImmutableDenseMatrix": "array",
}
NUMEXPR_TRANSLATIONS = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 0.7.7 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
matrix([[1],
[2]])
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
#Attempt to import numpy
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if _module_present('numpy',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumPyPrinter as printer
if _module_present('numexpr',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumExprPrinter as printer
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
namespace.update({flat: flatten})
func = eval(lstr, namespace)
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n"
"Expression:\n\n{expr}").format(sig=sig, expr=expr_str)
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except Exception:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
| maniteja123/sympy | sympy/utilities/lambdify.py | Python | bsd-3-clause | 21,677 |
import logging
import subprocess
from django.contrib import auth
from django.template import TemplateSyntaxError
from django.test import LiveServerTestCase, TestCase
from django.test.utils import override_settings
from allauth.account.models import EmailAddress
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
logger = logging.getLogger(__name__)
UserModel = auth.get_user_model()
@override_settings(SSLIFY_DISABLE=True)
class SmokeTestCase(TestCase):
"""
A helper for testing lists of URLs.
"""
fixtures = ["open_humans/fixtures/test-data.json"]
anonymous_urls = []
authenticated_urls = []
authenticated_or_anonymous_urls = []
post_only_urls = []
redirect_urls = []
@property
def all_anonymous_urls(self):
return self.anonymous_urls + self.authenticated_or_anonymous_urls
def assert_status_code(self, url, status_code=None, method="get"):
if not status_code:
status_code = [200, 302]
elif isinstance(status_code, int):
status_code = [status_code]
try:
response = getattr(self.client, method)(url)
except TemplateSyntaxError as e:
raise Exception("{} had a TemplateSyntaxError: {}".format(url, e))
self.assertEqual(
response.status_code in status_code,
True,
msg="{} returned {} instead of {}".format(
url, response.status_code, status_code
),
)
def assert_login(self):
login = self.client.login(username="beau", password="asdfqwerty")
self.assertEqual(login, True)
def test_get_all_simple_urls(self):
for url in self.all_anonymous_urls:
self.assert_status_code(url)
def test_login_redirect(self):
for url in self.redirect_urls or self.authenticated_urls:
response = self.client.get(url)
self.assertRedirects(
response,
"/account/login/",
msg_prefix="{} did not redirect to login URL".format(url),
)
def test_all_urls_with_login(self):
self.assert_login()
for url in self.all_anonymous_urls:
self.assert_status_code(url)
for url in self.redirect_urls + self.authenticated_urls:
try:
self.assert_status_code(url)
except AttributeError:
# We do not actually use or even set all the fields in the model
# associated with AccountSettingsView; while this is not a problem
# with running the code in prod, the django test client is attempting
# to access them.
pass
def test_invalid_method(self):
self.assert_login()
for url in self.post_only_urls:
self.assert_status_code(url, status_code=405)
def test_post_only(self):
self.assert_login()
for url in self.post_only_urls:
self.assert_status_code(url, method="post")
def short_hash():
"""
Return the current git commit or `None`.
"""
try:
return subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
except: # pylint: disable=bare-except
return None
class BrowserTestCase(LiveServerTestCase):
"""
A test case that runs via BrowserStack.
"""
fixtures = ["open_humans/fixtures/test-data.json"]
def setUp(self):
super(BrowserTestCase, self).setUp()
self.timeout = 10
self.driver = webdriver.Chrome()
self.driver.delete_all_cookies()
self.driver.maximize_window()
def tearDown(self):
self.driver.quit()
super(BrowserTestCase, self).tearDown()
def wait_for_element_id(self, element_id):
return WebDriverWait(self.driver, self.timeout).until(
expected_conditions.visibility_of_element_located((By.ID, element_id))
)
def login(self):
driver = self.driver
driver.get(self.live_server_url + "/account/login/")
try:
driver.find_element_by_link_text("Log out").click()
except NoSuchElementException:
pass
username = driver.find_element_by_id("login-username")
username.clear()
username.send_keys("beau")
password = driver.find_element_by_id("login-password")
password.clear()
password.send_keys("test")
driver.find_element_by_id("login").click()
self.assertEqual(
"Log out",
driver.find_element_by_css_selector(
".navbar-fixed-top .navbar-right .logout-link"
).text,
)
self.assertEqual(
"All activities",
driver.find_element_by_css_selector(
".body-main > .container > " ".row > .toolbar-column " "button.selected"
).text,
)
def get_or_create_user(name):
"""
Helper to create a Django user.
"""
try:
user = UserModel.objects.get(username=name)
except UserModel.DoesNotExist:
email = "{}@test.com".format(name)
user = UserModel.objects.create_user(name, email=email, password=name)
email = EmailAddress.objects.create(
user=user, email=email, verified=False, primary=True
)
return user
| OpenHumans/open-humans | common/testing.py | Python | mit | 5,545 |
disp_avlbl = True
import os
if 'DISPLAY' not in os.environ:
disp_avlbl = False
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as lg
from time import time
import sys
sys.path.append('./')
sys.path.append(os.path.realpath(__file__))
from .static_graph_embedding import StaticGraphEmbedding
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
class HOPE(StaticGraphEmbedding):
def __init__(self, *hyper_dict, **kwargs):
''' Initialize the HOPE class
Args:
d: dimension of the embedding
beta: higher order coefficient
'''
hyper_params = {
'method_name': 'hope_gsvd'
}
hyper_params.update(kwargs)
for key in hyper_params.keys():
self.__setattr__('_%s' % key, hyper_params[key])
for dictionary in hyper_dict:
for key in dictionary:
self.__setattr__('_%s' % key, dictionary[key])
def get_method_name(self):
return self._method_name
def get_method_summary(self):
return '%s_%d' % (self._method_name, self._d)
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
t1 = time()
# A = nx.to_scipy_sparse_matrix(graph)
# I = sp.eye(graph.number_of_nodes())
# M_g = I - self._beta*A
# M_l = self._beta*A
A = nx.to_numpy_matrix(graph)
M_g = np.eye(len(graph.nodes)) - self._beta * A
M_l = self._beta * A
S = np.dot(np.linalg.inv(M_g), M_l)
u, s, vt = lg.svds(S, k=self._d // 2)
X1 = np.dot(u, np.diag(np.sqrt(s)))
X2 = np.dot(vt.T, np.diag(np.sqrt(s)))
t2 = time()
self._X = np.concatenate((X1, X2), axis=1)
p_d_p_t = np.dot(u, np.dot(np.diag(s), vt))
eig_err = np.linalg.norm(p_d_p_t - S)
print('SVD error (low rank): %f' % eig_err)
return self._X, (t2 - t1)
def get_embedding(self):
return self._X
def get_edge_weight(self, i, j):
return np.dot(self._X[i, :self._d // 2], self._X[j, self._d // 2:])
def get_reconstructed_adj(self, X=None, node_l=None):
if X is not None:
node_num = X.shape[0]
self._X = X
else:
node_num = self._node_num
adj_mtx_r = np.zeros((node_num, node_num))
for v_i in range(node_num):
for v_j in range(node_num):
if v_i == v_j:
continue
adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j)
return adj_mtx_r
if __name__ == '__main__':
# load Zachary's Karate graph
edge_f = 'data/karate.edgelist'
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False)
G = G.to_directed()
res_pre = 'results/testKarate'
graph_util.print_graph_stats(G)
t1 = time()
embedding = HOPE(4, 0.01)
embedding.learn_embedding(graph=G, edge_f=None,
is_weighted=True, no_python=True)
print('HOPE:\n\tTraining time: %f' % (time() - t1))
viz.plot_embedding2D(embedding.get_embedding()[:, :2],
di_graph=G, node_colors=None)
plt.show()
| palash1992/GEM | gem/embedding/hope.py | Python | bsd-3-clause | 3,591 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor INA219 hardware monitor configuration."""
from makani.avionics.firmware.drivers import ina219_types
from makani.avionics.firmware.serial import motor_serial_params as rev
ina219_default = {
'name': '',
'address': 0x0,
'shunt_resistor': 0.01,
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
'bus_adc': ina219_types.kIna219Adc128Samples,
'shunt_adc': ina219_types.kIna219Adc128Samples,
'mode': ina219_types.kIna219ModeShuntAndBusContinuous,
'current_max': -1,
'voltage_limits_percent': [95, 105],
}
ina219_16v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range40mv,
})
ina219_16v_80mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage16V,
'range': ina219_types.kIna219Range80mv,
})
ina219_32v_40mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range40mv,
})
ina219_32v_160mv = dict(ina219_default, **{
'bus_voltage': ina219_types.kIna219BusVoltage32V,
'range': ina219_types.kIna219Range160mv,
})
gin_a1 = [
dict(ina219_32v_40mv, name='12v', address=0x40, shunt_resistor=0.012),
dict(ina219_16v_40mv, name='1v2', address=0x42, shunt_resistor=0.02),
dict(ina219_16v_40mv, name='3v3', address=0x45, shunt_resistor=0.02),
]
gin_a2 = gin_a1
gin_a3 = [
dict(ina219_32v_160mv, name='12v', address=0x41, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='1v2', address=0x42, shunt_resistor=0.05),
dict(ina219_16v_80mv, name='3v3', address=0x45, shunt_resistor=0.05),
]
ina219_config = (rev.MotorHardware, {
rev.MotorHardware.GIN_A1: gin_a1,
rev.MotorHardware.GIN_A2: gin_a2,
rev.MotorHardware.GIN_A3: gin_a3,
rev.MotorHardware.GIN_A4_CLK16: gin_a3,
rev.MotorHardware.GIN_A4_CLK8: gin_a3,
rev.MotorHardware.OZONE_A1: gin_a3,
})
| google/makani | avionics/motor/monitors/motor_ina219.py | Python | apache-2.0 | 2,535 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors."""
if not isinstance(data, pd.DataFrame):
return data
if all(dtype.name in PANDAS_DTYPES for dtype in data.dtypes):
return data.values.astype('float')
else:
raise ValueError('Data types for data must be int, float, or bool.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame."""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels."""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
if all(dtype.name in PANDAS_DTYPES for dtype in labels.dtypes):
return labels.values
else:
raise ValueError('Data types for labels must be int, float, or bool.')
else:
return labels
| TakayukiSakai/tensorflow | tensorflow/contrib/learn/python/learn/io/pandas_io.py | Python | apache-2.0 | 2,243 |
from nose.tools import * # noqa: F403
from tests.base import AdminTestCase
from admin.base.forms import GuidForm
class TestGuidForm(AdminTestCase):
def setUp(self):
super(TestGuidForm, self).setUp()
def test_valid_data(self):
guid = '12345'
form = GuidForm({
'guid': guid,
})
assert_true(form.is_valid())
assert_equal(form.cleaned_data.get('guid'), guid)
def test_blank_data(self):
form = GuidForm({})
assert_false(form.is_valid())
assert_equal(form.errors, {
'guid': [u'This field is required.'],
})
| saradbowman/osf.io | admin_tests/base/test_forms.py | Python | apache-2.0 | 624 |
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from wsgiref import simple_server
import falcon
from oslo.config import cfg
import six
import poppy.openstack.common.log as logging
from poppy import transport
from poppy.transport.falcon import (
v1, services
)
_WSGI_OPTIONS = [
cfg.StrOpt('bind', default='127.0.0.1',
help='Address on which the self-hosting server will listen'),
cfg.IntOpt('port', default=8888,
help='Port on which the self-hosting server will listen'),
]
_WSGI_GROUP = 'drivers:transport:falcon'
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class TransportDriver(transport.Driver):
def __init__(self, conf, manager):
super(TransportDriver, self).__init__(conf, manager)
self._conf.register_opts(_WSGI_OPTIONS, group=_WSGI_GROUP)
self._wsgi_conf = self._conf[_WSGI_GROUP]
self._setup_app()
def _setup_app(self):
"""Initialize hooks and URI routes to resources."""
self._app = falcon.API()
version_path = "/v1.0"
project_id = "/{project_id}"
prefix = version_path + project_id
# init the controllers
service_controller = self.manager.services_controller
# setup the routes
self._app.add_route(prefix,
v1.V1Resource())
self._app.add_route(prefix + '/services',
services.ServicesResource(service_controller))
self._app.add_route(prefix + '/services/{service_name}',
services.ServiceResource(service_controller))
def listen(self):
"""Self-host using 'bind' and 'port' from the WSGI config group."""
msgtmpl = (u'Serving on host %(bind)s:%(port)s')
LOG.info(msgtmpl,
{'bind': self._wsgi_conf.bind, 'port': self._wsgi_conf.port})
httpd = simple_server.make_server(self._wsgi_conf.bind,
self._wsgi_conf.port,
self.app)
httpd.serve_forever()
| amitgandhinz/cdn | poppy/transport/falcon/driver.py | Python | apache-2.0 | 2,641 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Phone Extension',
'version': '1.0',
'category': 'Human Resources',
'author': "Savoir-faire Linux,Odoo Community Association (OCA)",
'website': 'http://www.savoirfairelinux.com',
'license': 'AGPL-3',
'depends': ['hr', ],
'data': [
'views/hr_employee_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
| abstract-open-solutions/hr | hr_employee_phone_extension/__openerp__.py | Python | agpl-3.0 | 1,432 |
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# Trevor Cohn <[email protected]> (additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: probability.py,v 1.1.1.2 2004/09/29 21:57:49 adastra Exp $
"""
Classes for representing and processing probabilistic information.
The L{FreqDist} class is used to encode X{frequency distributions},
which count the number of times that each outcome of an experiment
occurs.
The L{ProbDistI} class defines a standard interface for X{probability
distributions}, which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- X{derived probability distributions} are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- X{analytic probability distributions} are created directly from
parameters (such as variance).
The L{ConditionalFreqDist} class and L{ConditionalProbDistI} interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the C{ConditionalProbDistI} interface is
L{ConditionalProbDist}, a derived distribution.
The L{ProbabilisticMixIn} class is a mix-in class that can be used to
associate probabilities with data classes (such as C{Token} or
C{Tree}).
@group Frequency Distributions: FreqDist
@group Derived Probability Distributions: ProbDistI, MLEProbDist,
LidstoneProbDist, LaplaceProbDist, ELEProbDist, HeldoutProbDist,
CrossValidationProbDist
@group Analyitic Probability Distributions: UniformProbDist
@group Conditional Distributions: ConditionalFreqDist,
ConditionalProbDistI, ConditionalProbDist
@group Probabilistic Mix-In: ProbabilisticMixIn
@sort: FreqDist, ProbDistI, MLEProbDist, LidstoneProbDist, LaplaceProbDist,
ELEProbDist, HeldoutProbDist, CrossValidationProbDist, UniformProbDist,
ConditionalFreqDist, ConditionalProbDistI, ConditionalProbDist
@todo: Better handling of log probabilities.
"""
from nltk.chktype import chktype as _chktype
from sets import Set
from nltk.util import sum_logs
import types, math, numpy
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
class FreqDist:
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occured. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occured as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> fdist = FreqDist()
>>> for token in text_token['SUBTOKENS']:
... fdist.inc(token['TEXT'])
"""
def __init__(self):
"""
Construct a new empty, C{FreqDist}. In particular, the count
for every sample is zero.
"""
self._count = {}
self._N = 0
self._Nr_cache = None
self._max_cache = None
def inc(self, sample, count=1):
"""
Increment this C{FreqDist}'s count for the given
sample.
@param sample: The sample whose count should be incremented.
@type sample: any
@param count: The amount to increment the sample's count by.
@type count: C{int}
@rtype: None
@raise NotImplementedError: If C{sample} is not a
supported sample type.
"""
assert _chktype(2, count, types.IntType)
if count == 0: return
self._N += count
self._count[sample] = self._count.get(sample,0) + count
# Invalidate the Nr cache and max cache.
self._Nr_cache = None
self._max_cache = None
def N(self):
"""
@return: The total number of sample outcomes that have been
recorded by this C{FreqDist}. For the number of unique
sample values (or bins) with counts greater than zero, use
C{FreqDist.B()}.
@rtype: C{int}
"""
return self._N
def B(self):
"""
@return: The total number of sample values (or X{bins}) that
have counts greater than zero. For the total
number of sample outcomes recorded, use C{FreqDist.N()}.
@rtype: C{int}
"""
return len(self._count)
def samples(self):
"""
@return: A list of all samples that have been recorded as
outcomes by this frequency distribution. Use C{count()}
to determine the count for each sample.
@rtype: C{list}
"""
return self._count.keys()
def Nr(self, r, bins=None):
"""
@return: The number of samples with count r.
@rtype: C{int}
@type r: C{int}
@param r: A sample count.
@type bins: C{int}
@param bins: The number of possible sample outcomes. C{bins}
is used to calculate Nr(0). In particular, Nr(0) is
C{bins-self.B()}. If C{bins} is not specified, it
defaults to C{self.B()} (so Nr(0) will be 0).
"""
assert _chktype(1, r, types.IntType)
assert _chktype(2, bins, types.IntType, types.NoneType)
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self.samples():
c = self._count.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def count(self, sample):
"""
Return the count of a given sample. The count of a sample is
defined as the number of times that sample outcome was
recorded by this C{FreqDist}. Counts are non-negative
integers.
@return: The count of a given sample.
@rtype: C{int}
@param sample: the sample whose count
should be returned.
@type sample: any.
"""
return self._count.get(sample, 0)
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this C{FreqDist}. The count of a sample is defined as the
number of times that sample outcome was recorded by this
C{FreqDist}. Frequencies are always real numbers in the range
[0, 1].
@return: The frequency of a given sample.
@rtype: float
@param sample: the sample whose frequency
should be returned.
@type sample: any
"""
if self._N is 0: return 0
return float(self._count.get(sample, 0)) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occured in this
frequency distribution, return C{None}.
@return: The sample with the maximum number of outcomes in this
frequency distribution.
@rtype: any or C{None}
"""
if self._max_cache is None:
best_sample = None
best_count = -1
for sample in self._count.keys():
if self._count[sample] > best_count:
best_sample = sample
best_count = self._count[sample]
self._max_cache = best_sample
return self._max_cache
def sorted_samples(self):
"""
Return the samples sorted in decreasing order of frequency. Instances
with the same count will be arbitrarily ordered. Instances with a
count of zero will be omitted. This method is C{O(N^2)}, where C{N} is
the number of samples, but will complete in a shorter time on average.
@return: The set of samples in sorted order.
@rtype: sequence of any
"""
items = [(-count,sample) for (sample,count) in self._count.items()]
items.sort()
return [sample for (neg_count,sample) in items]
def __repr__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
return '<FreqDist with %d samples>' % self.N()
def __str__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
samples = self.sorted_samples()
items = ['%r: %r' % (s, self._count[s]) for s in samples]
return '<FreqDist: %s>' % ', '.join(items)
def __contains__(self, sample):
"""
@return: True if the given sample occurs one or more times in
this frequency distribution.
@rtype: C{boolean}
@param sample: The sample to search for.
@type sample: any
"""
return self._count.has_key(sample)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI:
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. C{ProbDist}s are often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
def __init__(self):
if self.__class__ == ProbDistI:
raise AssertionError, "Interfaces can't be instantiated"
def prob(self, sample):
"""
@return: the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
raise AssertionError()
def logprob(self, sample):
"""
@return: the natural logarithm of the probability for a given
sample. Log probabilities range from negitive infinity to
zero.
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return -1e1000
else:
return math.log(p)
def max(self):
"""
@return: the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
@rtype: any
"""
raise AssertionError()
def samples(self):
"""
@return: A list of all samples that have nonzero
probabilities. Use C{prob} to find the probability of
each sample.
@rtype: C{list}
"""
raise AssertionError()
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in C{samples}.
@param samples: The samples that should be given uniform
probability.
@type samples: C{list}
@raise ValueError: If C{samples} is empty.
"""
assert _chktype(1, samples, [], ())
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = Set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if C{log} is true). If C{normalize} is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
"""
assert _chktype(1, prob_dict, {})
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= -1e1000:
logp = math.log(1.0/len(prob_dict.keys()))
for x in prob_dict.keys():
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict.keys())
for x in prob_dict.keys():
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return math.exp(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, 1e-1000)
else:
if sample not in self._prob_dict: return 1e-1000
else: return math.log(self._prob_dict[sample])
def max(self):
if not hasattr(self, '_max'):
self._max = max([(p,v) for (v,p) in self._prob_dict.items()])[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{maximum likelihood estimate} approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
assert _chktype(1, freqdist, FreqDist)
if freqdist.N() == 0:
raise ValueError('An MLE probability distribution must '+
'have at least one sample.')
self._freqdist = freqdist
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
C{Lidstone estimate} is paramaterized by a real number M{gamma},
which typically ranges from 0 to 1. The X{Lidstone estimate}
approximates the probability of a sample with count M{c} from an
experiment with M{N} outcomes and M{B} bins as
M{(c+gamma)/(N+B*gamma)}. This is equivalant to adding
M{gamma} to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type gamma: C{float}
@param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
M{gamma} to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, gamma, types.FloatType, types.IntType)
assert _chktype(3, bins, types.IntType, types.NoneType)
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s must be ' % name +
'greater than or equal to\nthe number of '+
'bins in the FreqDist used to create it.')
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist.count(sample)
return (c + self._gamma) / (self._N + self._bins * self._gamma)
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
X{Lidstone estimate} approximates the probability of a sample with
count M{c} from an experiment with M{N} outcomes and M{B} bins as
M{(c+1)/(N+B)}. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{expected likelihood estimate} approximates the probability of a
sample with count M{c} from an experiment with M{N} outcomes and
M{B} bins as M{(c+0.5)/(N+B/2)}. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
X{heldout estimate} uses uses the X{heldout frequency
distribution} to predict the probability of each sample, given its
frequency in the X{base frequency distribution}.
In particular, the heldout estimate approximates the probability
for a sample that occurs M{r} times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur M{r} times in the base distribution.
This average frequency is M{Tr[r]/(Nr[r]*N)}, where:
- M{Tr[r]} is the total count in the heldout distribution for
all samples that occur M{r} times in the base
distribution.
- M{Nr[r]} is the number of samples that occur M{r} times in
the base distribution.
- M{N} is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the C{prob} member
function, M{Tr[r]/(Nr[r]*N)} is precomputed for each value of M{r}
when the C{HeldoutProbDist} is created.
@type _estimate: C{list} of C{float}
@ivar _estimate: A list mapping from M{r}, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. C{_estimate[M{r}]} is
calculated by finding the average frequency in the heldout
distribution of all samples that occur M{r} times in the base
distribution. In particular, C{_estimate[M{r}]} =
M{Tr[r]/(Nr[r]*N)}.
@type _max_r: C{int}
@ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. C{_max_r} is used to decide how
large C{_estimate} must be.
"""
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate C{base_fdist} and
C{heldout_fdist}.
@type base_fdist: C{FreqDist}
@param base_fdist: The base frequency distribution.
@type heldout_fdist: C{FreqDist}
@param heldout_fdist: The heldout frequency distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, base_fdist, FreqDist)
assert _chktype(2, heldout_fdist, FreqDist)
assert _chktype(3, bins, types.IntType, types.NoneType)
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist.count(base_fdist.max())
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
@return: the list M{Tr}, where M{Tr[r]} is the total count in
C{heldout_fdist} for all samples that occur M{r}
times in C{base_fdist}.
@rtype: C{list} of C{float}
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist.samples():
r = self._base_fdist.count(sample)
Tr[r] += self._heldout_fdist.count(sample)
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
@return: the list M{estimate}, where M{estimate[r]} is the
probability estimate for any sample that occurs M{r} times
in the base frequency distribution. In particular,
M{estimate[r]} is M{Tr[r]/(N[r]*N)}. In the special case
that M{N[r]=0}, M{estimate[r]} will never be used; so we
define M{estimate[r]=None} for those cases.
@rtype: C{list} of C{float}
@type Tr: C{list} of C{float}
@param Tr: the list M{Tr}, where M{Tr[r]} is the total count in
the heldout distribution for all samples that occur M{r}
times in base distribution.
@type Nr: C{list} of C{float}
@param Nr: The list M{Nr}, where M{Nr[r]} is the number of
samples that occur M{r} times in the base distribution.
@type N: C{int}
@param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
@return: The base frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._base_fdist
def heldout_fdist(self):
"""
@return: The heldout frequency distribution that this
probability distribution is based on.
@rtype: C{FreqDist}
"""
return self._heldout_fdist
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist.count(sample)
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The X{cross-validation estimate} for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
C{freqdists}.
@type freqdists: C{list} of C{FreqDist}
@param freqdists: A list of the frequency distributions
generated by the experiment.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdists, [FreqDist], (FreqDist,))
assert _chktype(2, bins, types.IntType, types.NoneType)
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
@rtype: C{list} of C{FreqDist}
@return: The list of frequency distributions that this
C{ProbDist} is based on.
"""
return self._freqdists
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occuring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once.
The probability mass reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occuring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
The parameters M{T} and M{N} are taken from the C{freqdist} parameter
(the C{B()} and C{N()} values). The normalising factor M{Z} is
calculated using these values along with the C{bins} parameter.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's assumed to be
equal to that of the C{freqdist}
@type bins: C{Int}
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist.count(sample)
if c == 0:
return self._T / float(self._Z * (self._N + self._T))
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
"""
# TODO - add a cut-off parameter, above which the counts are unmodified
# (see J&M p216)
def __init__(self, freqdist, bins):
"""
Creates a Good-Turing probability distribution estimate. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
The C{bins} parameter allows C{N(0)} to be estimated.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's taken to be
equal to C{freqdist.B()}.
@type bins: C{Int}
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
# inherit docs from FreqDist
c = self._freqdist.count(sample)
nc = self._freqdist.Nr(c, self._bins)
ncn = self._freqdist.Nr(c + 1, self._bins)
return float(c + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
@param prob_dist: the distribution from which to garner the
probabilities
@type prob_dist: ProbDist
@param samples: the complete set of samples
@type samples: sequence of any
@param store_logs: whether to store the probabilities as logarithms
@type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict([(samples[i], i) for i in range(len(samples))])
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit doco
return self._samples
def prob(self, sample):
# inherit doco
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return exp(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit doco
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return self._data[i]
else:
return log(self._data[i])
else:
return NINF
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
@param sample: the sample for which to update the probability
@type sample: any
@param prob: the new probability
@param prob: float
@param log: is the probability already logged
@param log: bool
"""
i = self._sample_dict.get(sample)
assert i != None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = log(prob)
else:
if log: self._data[i] = exp(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
# Is this right?
return sum([actual_pdist.prob(s) * math.log(test_pdist.prob(s))
for s in actual_pdist.samples()])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist:
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occured, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word type in a document, given the
length of the word type. Formally, a conditional frequency
distribution can be defined as a function that maps from each
condition to the C{FreqDist} for the experiment under that
condition.
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 73 outcomes>
>>> cfdist[3].freq('the')
0.4
>>> cfdist[3].count('dog')
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
C{ConditionalFreqDist} creates a new empty C{FreqDist} for that
condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> cfdist = ConditionalFreqDist()
>>> for token in text_token['SUBTOKENS']:
... condition = len(token['TEXT'])
... cfdist[condition].inc(token['TEXT'])
"""
def __init__(self):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
"""
self._fdists = {}
def __getitem__(self, condition):
"""
Return the frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition. If the frequency distribution for
the given condition has not been accessed before, then this
will create a new empty C{FreqDist} for that condition.
@return: The frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition.
@rtype: C{FreqDist}
@param condition: The condition under which the experiment was
run.
@type condition: any
"""
# Create the conditioned freq dist, if it doesn't exist
if not self._fdists.has_key(condition):
self._fdists[condition] = FreqDist()
return self._fdists[condition]
def conditions(self):
"""
@return: A list of the conditions that have been accessed for
this C{ConditionalFreqDist}. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
@rtype: C{list}
"""
return self._fdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalFreqDist}.
@rtype: C{string}
"""
n = len(self._fdists)
return '<ConditionalFreqDist with %d conditions>' % n
class ConditionalProbDistI:
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the C{ProbDist} for the experiment under that
condition.
"""
def __init__(self):
raise AssertionError, 'ConditionalProbDistI is an interface'
def __getitem__(self, condition):
"""
@return: The probability distribution for the experiment run
under the given condition.
@rtype: C{ProbDistI}
@param condition: The condition whose probability distribution
should be returned.
@type condition: any
"""
raise AssertionError
def conditions(self):
"""
@return: A list of the conditions that are represented by
this C{ConditionalProbDist}. Use the indexing operator to
access the probability distribution for a given condition.
@rtype: C{list}
"""
raise AssertionError
# For now, this is the only implementation of ConditionalProbDistI;
# but we would want a different implementation if we wanted to build a
# conditional probability distribution analytically (e.g., a gaussian
# distribution), rather than basing it on an underlying frequency
# distribution.
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A C{ConditoinalProbDist} is constructed from a
C{ConditionalFreqDist} and a X{C{ProbDist} factory}:
- The B{C{ConditionalFreqDist}} specifies the frequency
distribution for each condition.
- The B{C{ProbDist} factory} is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A C{ProbDist} class's name (such as
C{MLEProbDist} or C{HeldoutProbDist}) can be used to specify
that class's constructor.
The first argument to the C{ProbDist} factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the C{factory_args} parameter to the
C{ConditionalProbDist} constructor. For example, the following
code constructs a C{ConditionalProbDist}, where the probability
distribution for each condition is an C{ELEProbDist} with 10 bins:
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
supply_condition=False, *factory_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and C{ProbDist}
factory.
@type cfdist: L{ConditionalFreqDist}
@param cfdist: The C{ConditionalFreqDist} specifying the
frequency distribution for each condition.
@type probdist_factory: C{class} or C{function}
@param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument, the condition as its
second argument (only if C{supply_condition=True}), and
C{factory_args} as its remaining arguments.
@type supply_condition: C{bool}
@param supply_condition: If true, then pass the condition as
the second argument to C{probdist_factory}.
@type factory_args: (any)
@param factory_args: Extra arguments for C{probdist_factory}.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
"""
assert _chktype(1, cfdist, ConditionalFreqDist)
assert _chktype(2, probdist_factory, types.FunctionType,
types.BuiltinFunctionType, types.MethodType,
types.ClassType)
assert _chktype(3, supply_condition, bool)
self._probdist_factory = probdist_factory
self._cfdist = cfdist
self._supply_condition = supply_condition
self._factory_args = factory_args
self._pdists = {}
for c in cfdist.conditions():
if supply_condition:
pdist = probdist_factory(cfdist[c], c, *factory_args)
else:
pdist = probdist_factory(cfdist[c], *factory_args)
self._pdists[c] = pdist
def __getitem__(self, condition):
if not self._pdists.has_key(condition):
# If it's a condition we haven't seen, create a new prob
# dist from the empty freq dist. Typically, this will
# give a uniform prob dist.
pdist = self._probdist_factory(FreqDist(), *self._factory_args)
self._pdists[condition] = pdist
return self._pdists[condition]
def conditions(self):
return self._pdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalProbDist}.
@rtype: C{string}
"""
n = len(self._pdists)
return '<ConditionalProbDist with %d conditions>' % n
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
@param probdist_dict: a dictionary containing the probdists indexed
by the conditions
@type probdist_dict: dict any -> probdist
"""
self._dict = probdist_dict
def __getitem__(self, condition):
# inherit doco
# this will cause an exception for unseen conditions
return self._dict[condition]
def conditions(self):
# inherit doco
return self._dict.keys()
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn:
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the C{ProbabilisticMixIn} class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
L{constructor<__init__>} for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. C{prob} should generally be
the first argument for those constructors.
@kwparam prob: The probability associated with the object.
@type prob: C{float}
@kwparam logprob: The log of the probability associated with
the object.
@type logrpob: C{float}
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to C{prob}.
@param prob: The new probability
@type prob: C{float}
"""
assert _chktype(1, prob, types.IntType, types.FloatType)
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
C{logprob}. I.e., set the probability associated with this
object to C{exp(logprob)}.
@param logprob: The new log probability
@type logprob: C{float}
"""
assert _chktype(1, prob, types.IntType, types.FloatType)
self.__logprob = prob
self.__prob = None
def prob(self):
"""
@return: The probability associated with this object.
@rtype: C{float}
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = math.exp(self.__logprob)
return self.__prob
def logprob(self):
"""
@return: C{log(p)}, where C{p} is the probability associated
with this object.
@rtype: C{float}
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to C{numsamples}, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
from math import sqrt
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
C{_create_rand_fdist(numsamples, x)}.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
C{numsamples} samples. Each frequency distribution is sampled
C{numoutcomes} times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
@type numsamples: C{int}
@param numsamples: The number of samples to use in each demo
frequency distributions.
@type numoutcomes: C{int}
@param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
C{numsamples} bins.
@rtype: C{None}
"""
assert _chktype(1, numsamples, types.IntType)
assert _chktype(2, numoutcomes, types.IntType)
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple([`pdist`[1:9] for pdist in pdists[:-1]])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
| ronaldahmed/robot-navigation | neural-navigation-with-lstm/MARCO/nltk/probability.py | Python | mit | 60,530 |
__author__ = 'Stefan'
| sDessens/coinotomy | coinotomy/watchers/tests/__init__.py | Python | mit | 22 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""deprecate_restricted_metrics
Revision ID: 11c737c17cc6
Revises: def97f26fdfb
Create Date: 2019-09-08 21:50:58.200229
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "11c737c17cc6"
down_revision = "def97f26fdfb"
def upgrade():
with op.batch_alter_table("metrics") as batch_op:
batch_op.drop_column("is_restricted")
with op.batch_alter_table("sql_metrics") as batch_op:
batch_op.drop_column("is_restricted")
def downgrade():
op.add_column(
"sql_metrics", sa.Column("is_restricted", sa.BOOLEAN(), nullable=True)
)
op.add_column("metrics", sa.Column("is_restricted", sa.BOOLEAN(), nullable=True))
| apache/incubator-superset | superset/migrations/versions/11c737c17cc6_deprecate_restricted_metrics.py | Python | apache-2.0 | 1,487 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import http
from openerp.addons.website_portal.controllers.main import website_account
from openerp.http import request
class WebsiteAccount(website_account):
@http.route()
def account(self):
response = super(WebsiteAccount, self).account()
user = request.env.user
# TDE FIXME: shouldn't that be mnaged by the access rule itself ?
# portal projects where you or someone from your company are a follower
project_issues = request.env['project.issue'].sudo().search([
'&',
('project_id.privacy_visibility', '=', 'portal'),
'|',
('message_partner_ids', 'child_of', [user.partner_id.commercial_partner_id.id]),
('message_partner_ids', 'child_of', [user.partner_id.id])
])
response.qcontext.update({'issues': project_issues})
return response
class WebsiteProjectIssue(http.Controller):
@http.route(['/my/issues/<int:issue_id>'], type='http', auth="user", website=True)
def issues_followup(self, issue_id=None):
issue = request.env['project.issue'].browse(issue_id)
return request.website.render("website_project_issue.issues_followup", {'issue': issue})
| be-cloud-be/horizon-addons | server/addons/website_project_issue/controllers/main.py | Python | agpl-3.0 | 1,320 |
# -*- coding: utf-8 -*-
#
# Django FileBrowser documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 5 19:11:46 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django FileBrowser'
copyright = u'2015, Patrick Kranzlmueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.6.2'
# The full version, including alpha/beta/rc tags.
release = '3.6.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#html_style = "custom.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoFileBrowserdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoFileBrowser.tex', u'Django FileBrowser Documentation',
u'Patrick Kranzlmueller', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangofilebrowser', u'Django FileBrowser Documentation',
[u'Patrick Kranzlmueller'], 1)
]
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| michalwerner/django-filebrowser | docs/conf.py | Python | bsd-3-clause | 7,396 |
from typing import List
import pyasice
from esteid.exceptions import InvalidParameter
from esteid.signing import Container, DataFile, Signer
class MySigner(Signer):
def prepare(self, container: pyasice.Container = None, files: List[DataFile] = None) -> dict:
container = self.open_container(container, files)
xml_sig = pyasice.XmlSignature.create()
self.save_session_data(digest=b"test", container=container, xml_sig=xml_sig)
return {"verification_code": "1234"}
def finalize(self, data=None) -> Container:
return Container.open(self.session_data.temp_container_file)
class MyPostSigner(MySigner):
"""
Requires POST method parameters to init and finalize
"""
def setup(self, initial_data: dict = None):
try:
initial_data["certificate"]
except (TypeError, KeyError):
raise InvalidParameter(param="certificate")
def finalize(self, data=None) -> Container:
try:
data["signature_value"]
except (TypeError, KeyError):
raise InvalidParameter(param="signature_value")
return super().finalize()
| thorgate/django-esteid | esteid/flowtest/signer.py | Python | bsd-3-clause | 1,155 |
from Screens.Screen import Screen
from Screens.ChannelSelection import *
from Screens.ChoiceBox import ChoiceBox
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.config import ConfigNothing
from Components.ConfigList import ConfigList
from Components.Label import Label
from Components.SelectionList import SelectionList
from Components.MenuList import MenuList
from Components.SystemInfo import SystemInfo
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
from xml.etree.cElementTree import parse
from enigma import eDVBCI_UI, eDVBCIInterfaces, eEnv, eServiceCenter
from Tools.BoundFunction import boundFunction
from Tools.CIHelper import cihelper
from Tools.XMLTools import stringToXML
import os
class CIselectMainMenu(Screen):
skin = """
<screen name="CIselectMainMenu" position="center,center" size="500,250" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="CiList" position="5,50" size="490,200" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.close,
"ok": self.greenPressed,
"cancel": self.close
}, -1)
NUM_CI = SystemInfo["CommonInterface"]
print "[CI_Wizzard] FOUND %d CI Slots " % NUM_CI
self.dlg = None
self.state = { }
self.list = [ ]
if NUM_CI and NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
appname = _("Slot %d") %(slot+1) + " - " + _("unknown error")
if state == 0:
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
elif state == 1:
appname = _("Slot %d") %(slot+1) + " - " + _("init modules")
elif state == 2:
appname = _("Slot %d") %(slot+1) + " - " + eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 0, slot) )
else:
self.list.append( (_("no CI slots found") , ConfigNothing(), 1, -1) )
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["CiList"] = menuList
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def greenPressed(self):
cur = self["CiList"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 1:
print "[CI_Wizzard] there is no CI Slot in your receiver"
else:
print "[CI_Wizzard] selected CI Slot : %d" % slot
if config.usage.setup_level.index > 1: # advanced
self.session.open(CIconfigMenu, slot)
else:
self.session.open(easyCIconfigMenu, slot)
class CIconfigMenu(Screen):
skin = """
<screen name="CIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;18" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;18" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;18" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;18" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="CAidList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget source="CAidList" render="Label" position="5,80" size="550,45" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<ePixmap pixmap="div-h.png" position="0,125" zPosition="1" size="560,2" />
<widget source="ServiceList_desc" render="Label" position="5,130" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,160" size="550,250" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,160" size="550,250" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.__init__(self, session)
self.ci_slot = ci_slot
self.filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(self.ci_slot) + ".xml"
self["key_red"] = StaticText(_("Delete"))
self["key_green"] = StaticText(_("Add service"))
self["key_yellow"] = StaticText(_("Add provider"))
self["key_blue"] = StaticText(_("Select CAId"))
self["CAidList_desc"] = StaticText(_("Assigned CAIds:"))
self["CAidList"] = StaticText()
self["ServiceList_desc"] = StaticText(_("Assigned services/provider:"))
self["ServiceList_info"] = StaticText()
self["actions"] = ActionMap(["ColorActions","SetupActions", "MenuActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"menu": self.menuPressed,
"cancel": self.cancel
}, -1)
print "[CI_Wizzard_Config] Configuring CI Slots : %d " % self.ci_slot
i = 0
self.caidlist = []
for caid in eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot):
i += 1
self.caidlist.append((str(hex(int(caid))),str(caid),i))
print "[CI_Wizzard_Config_CI%d] read following CAIds from CI: %s" %(self.ci_slot, self.caidlist)
self.selectedcaid = []
self.servicelist = []
self.caids = ""
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML()
# if config mode !=advanced autoselect any caid
if config.usage.setup_level.index <= 1: # advanced
self.selectedcaid = self.caidlist
self.finishedCAidSelection(self.selectedcaid)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def redPressed(self):
self.delete()
def greenPressed(self):
self.session.openWithCallback( self.finishedChannelSelection, myChannelSelection, None)
def yellowPressed(self):
self.session.openWithCallback( self.finishedProviderSelection, myProviderSelection, None)
def bluePressed(self):
self.session.openWithCallback(self.finishedCAidSelection, CAidSelect, self.caidlist, self.selectedcaid)
def menuPressed(self):
if os.path.exists(self.filename):
self.session.openWithCallback(self.deleteXMLfile, MessageBox, _("Delete file") + " " + self.filename + "?", MessageBox.TYPE_YESNO)
def deleteXMLfile(self, answer):
if answer:
try:
os.remove(self.filename)
except:
print "[CI_Config_CI%d] error remove xml..." % self.ci_slot
else:
self.session.openWithCallback(self.restartGui, MessageBox, _("Restart GUI now?"), MessageBox.TYPE_YESNO)
def restartGui(self, answer):
if answer:
self.session.open(TryQuitMainloop, 3)
def cancel(self):
self.saveXML()
cihelper.load_ci_assignment(force=True)
self.close()
def setServiceListInfo(self):
if len(self.servicelist):
self["ServiceList_info"].setText("")
else:
self["ServiceList_info"].setText(_("No services/providers selected"))
def delete(self):
cur = self["ServiceList"].getCurrent()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedChannelSelection(self, *args):
if len(args):
ref = args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if find_in_list(self.servicelist, service_name, 0) == False:
str_service = service_ref.ref.toString()
split_ref = str_service.split(":")
if split_ref[0] == "1" and not str_service.startswith("1:134:") and "%3a//" not in str_service:
self.servicelist.append((service_name, ConfigNothing(), 0, str_service))
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedProviderSelection(self, *args):
item = len(args)
if item > 1:
if item > 2 and args[2] is True:
for ref in args[0]:
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if len(service_name) and find_in_list(self.servicelist, service_name, 0) == False:
split_ref = service_ref.ref.toString().split(":")
if split_ref[0] == "1":
self.servicelist.append((service_name, ConfigNothing(), 0, service_ref.ref.toString()))
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
else:
name = args[0]
dvbnamespace = args[1]
if find_in_list(self.servicelist, name, 0) == False:
self.servicelist.append((name, ConfigNothing(), 1, dvbnamespace))
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedCAidSelection(self, *args):
if len(args):
self.selectedcaid = args[0]
self.caids=""
if len(self.selectedcaid):
for item in self.selectedcaid:
if len(self.caids):
self.caids += ", " + item[0]
else:
self.caids = item[0]
else:
self.selectedcaid = []
self.caids = _("no CAId selected")
else:
self.selectedcaid = []
self.caids = _("no CAId selected")
self["CAidList"].setText(self.caids)
def saveXML(self):
try:
fp = file(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<ci>\n")
fp.write("\t<slot>\n")
fp.write("\t\t<id>%s</id>\n" % self.ci_slot)
for item in self.selectedcaid:
if len(self.selectedcaid):
fp.write("\t\t<caid id=\"%s\" />\n" % item[0])
for item in self.servicelist:
if len(self.servicelist):
name = item[0].replace('<', '<')
name = name.replace('&', '&')
name = name.replace('>', '>')
name = name.replace('"', '"')
name = name.replace("'", ''')
if item[2] == 1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (stringToXML(name), item[3]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (stringToXML(name), item[3]))
fp.write("\t</slot>\n")
fp.write("</ci>\n")
fp.close()
except:
print "[CI_Config_CI%d] xml not written" % self.ci_slot
os.unlink(self.filename)
def loadXML(self):
if not os.path.exists(self.filename):
self.setServiceListInfo()
return
def getValue(definitions, default):
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
self.read_services = []
self.read_providers = []
self.usingcaid = []
self.ci_config = []
try:
tree = parse(self.filename).getroot()
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
i = 0
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
self.selectedcaid.append((str(read_caid),str(read_caid),i))
self.usingcaid.append(long(read_caid,16))
i += 1
for service in slot.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
self.read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
self.read_providers.append((read_provider_name,read_provider_dvbname))
self.ci_config.append((int(read_slot), (self.read_services, self.read_providers, self.usingcaid)))
except:
print "[CI_Config_CI%d] error parsing xml..." % self.ci_slot
try:
os.remove(self.filename)
except:
print "[CI_Activate_Config_CI%d] error remove damaged xml..." % self.ci_slot
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
for item in self.read_providers:
if len(item):
self.finishedProviderSelection(item[0],item[1])
self.finishedCAidSelection(self.selectedcaid)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
class easyCIconfigMenu(CIconfigMenu):
skin = """
<screen name="easyCIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="ServiceList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,80" size="550,300" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,80" size="550,300" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
CIconfigMenu.__init__(self, session, ci_slot)
self["actions"] = ActionMap(["ColorActions","SetupActions", "MenuActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"menu": self.menuPressed,
"cancel": self.cancel
}, -1)
class CAidSelect(Screen):
skin = """
<screen name="CAidSelect" position="center,center" size="450,440" title="select CAId's" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="5,50" size="440,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="450,2" />
<widget source="introduction" render="Label" position="0,400" size="450,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, list, selected_caids):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
if find_in_list(selected_caids,list[listindex][0],0):
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, True)
else:
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to select/deselect a CAId."))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel
}, -1)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("select CAId's"))
def greenPressed(self):
list = self.list.getSelectionsList()
self.close(list)
def cancel(self):
self.close()
class myProviderSelection(ChannelSelectionBase):
skin = """
<screen name="myProviderSelection" position="center,center" size="560,440" title="Select provider to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = EDIT_BOUQUET
self["actions"] = ActionMap(["OkCancelActions", "ChannelSelectBaseActions"],
{
"showFavourites": self.showFavourites,
"showAllServices": self.showAllServices,
"showProviders": self.showProviders,
"showSatellites": boundFunction(self.showSatellites, changeMode=True),
"cancel": self.cancel,
"ok": self.channelSelected
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText(_("Press OK to select a provider."))
def showProviders(self):
pass
def showAllServices(self):
self.close(None)
def showFavourites(self):
pass
def __onExecCallback(self):
self.showSatellites()
self.setTitle(_("Select provider to add..."))
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if ref is None: return
if not (ref.flags & 64):
splited_ref = ref.toString().split(":")
if ref.flags == 7 and splited_ref[6] != "0":
self.dvbnamespace = splited_ref[6]
self.enterPath(ref)
elif (ref.flags & 7) == 7 and 'provider' in ref.toString():
menu = [(_("Provider"), "provider"),(_("All services provider"), "providerlist")]
def addAction(choice):
if choice is not None:
if choice[1] == "provider":
self.close(ref.getName(), self.dvbnamespace)
elif choice[1] == "providerlist":
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
providerlist = []
while True:
service = servicelist.getNext()
if not service.valid():
break
providerlist.append((service))
if providerlist:
self.close(providerlist, self.dvbnamespace, True)
else:
self.close(None)
self.session.openWithCallback(addAction, ChoiceBox, title = _("Select action"), list=menu)
def showSatellites(self, changeMode=False):
if changeMode:
return
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition'%(self.service_types)
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet=False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet=True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet=True
self.clearPath()
self.enterPath(ref, True)
if justSet:
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if service.getPath().find("FROM PROVIDER") != -1:
service_type = _("Providers")
try:
# why we need this cast?
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if unsigned_orbpos == 0xFFFF: #Cable
service_name = _("Cable")
elif unsigned_orbpos == 0xEEEE: #Terrestrial
service_name = _("Terrestrial")
else:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
service_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
service.setName("%s - %s" % (service_name, service_type))
self.servicelist.addService(service)
self.servicelist.finishFill()
if prev is not None:
self.setCurrentSelection(prev)
def cancel(self):
self.close(None)
class myChannelSelection(ChannelSelectionBase):
skin = """
<screen name="myChannelSelection" position="center,center" size="560,440" title="Select service to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = OFF
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"showProviders": self.showProviders,
"showSatellites": boundFunction(self.showSatellites, changeMode=True),
"showAllServices": self.showAllServices,
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
self["key_red"] = StaticText(_("All"))
self["key_green"] = StaticText(_("Close"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Favourites"))
self["introduction"] = StaticText(_("Press OK to select a service."))
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def showProviders(self):
pass
def showSatellites(self, changeMode=False):
if changeMode:
self.close(None)
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def activate_all(session):
cihelper.load_ci_assignment()
def find_in_list(list, search, listpos=0):
for item in list:
if item[listpos] == search:
return True
return False
def isModule():
NUM_CI = SystemInfo["CommonInterface"]
if NUM_CI and NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state > 0:
return True
return False
global_session = None
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
print "[CI_Assignment] activating ci configs:"
activate_all(global_session)
elif reason == 1:
global_session = None
def main(session, **kwargs):
session.open(CIselectMainMenu)
def menu(menuid, **kwargs):
if menuid == "cam" and isModule():
return [(_("Common Interface assignment"), main, "ci_assign", 11)]
return []
def Plugins(**kwargs):
description = _("a gui to assign services/providers to common interface modules")
if config.usage.setup_level.index > 1:
description = _("a gui to assign services/providers/caids to common interface modules")
return [PluginDescriptor(where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart),
PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart),
PluginDescriptor(name = _("Common Interface assignment"), description = description, where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu)]
| Antonio-Team/enigma2 | lib/python/Plugins/SystemPlugins/CommonInterfaceAssignment/plugin.py | Python | gpl-2.0 | 26,986 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
# What's a Perfect Power anyway?题目地址:http://www.codewars.com/kata/whats-a-perfect-power-anyway
'''
import time
import unittest
class TestCases(unittest.TestCase):
def test1(self):self.assertEqual(isPP(4), [2,2], "4 = 2^2")
def test2(self):self.assertEqual(isPP(9), [3,2], "9 = 3^2")
def test3(self):self.assertEqual(isPP(5), None, "5 isn't a perfect power")
def test4(self):self.assertEqual(isPP(8), [2,3])
def isPP(n):
for i in range(n % 2, int(n ** 0.5)+1, 2):
for j in range(n % 2, int(n ** 0.5)+2):
if i ** j == n:
return [i, j]
elif i ** j > n:
break
return None
if __name__ == '__main__':
unittest.main()
'''
参考解法:
def isPP(n):
for i in range(2, int(n**.5) + 1):
number = n
times = 0
while number % i == 0:
number /= i
times += 1
if number == 1:
return [i, times]
return None
解法2:
def isPP(n):
for m in range(2, int(n**0.5) + 1):
k = int(round(log(n, m)))
if m ** k == n:
return [m, k]
return None
''' | karchi/codewars_kata | 已完成/What's a Perfect Power anyway.py | Python | mit | 1,233 |
# -*- coding: utf-8 -*-
from model.group import Group
import pytest
from data.groups import constant as testdata
def test_add_group(app, data_groups):
group = data_groups
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups)+1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
'''''
@pytest.mark.parametrize("group",testdata, ids=[repr(x) for x in testdata])
def test_add_group(app, group):
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups)+1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_group(app, json_groups):
group = json_groups
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups)+1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
''''' | TerekhovaKate/Python_training | test/add_new_group.py | Python | apache-2.0 | 1,229 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('umap-learn')
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-umap.py | Python | gpl-3.0 | 517 |
"""Redundancy."""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "redundancy.wallace"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["rectangular", ["rectangular in shape"]],
["audible", ["audible to the ear"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_garner(text):
"""Suggest the preferred forms.
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
"""
err = "redundancy.garner"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["adequate", ["adequate enough"]],
["admitted", ["self-admitted"]],
["affidavit", ["sworn affidavit"]],
["agreement", ["mutual agreement"]],
["alumnus", ["former alumnus"]],
["antithetical", ["directly antithetical"]],
["approximately", ["approximately about"]],
["associate", ["associate together(?: in groups)?"]],
["bivouac", ["temporary bivouac", "bivouac camp"]],
["blend", ["blend together"]],
["but", ["but nevertheless"]],
["charged with...", ["accused of a charge"]],
["circumstances of", ["circumstances surrounding"]],
["circumstances", ["surrounding circumstances"]],
["close", ["close proximity"]],
["collaborate", ["collaborate together"]],
["collaborator", ["fellow collaborator"]],
["collaborators", ["fellow collaborators"]],
["collocated", ["collocated together"]],
["colleagues", ["fellow colleagues"]],
["combine", ["combine together"]],
["complacent", ["self-complacent"]],
["confessed", ["self-confessed"]],
["connect", ["connect together"]],
["consensus", ["(?:general )?consensus of opinion"]],
["consolidate", ["consolidate together"]],
["continues to", ["still continues to"]],
["contradictory", ["mutually contradictory"]],
["cooperation", ["mutual cooperation"]],
["couple", ["couple together"]],
["crisis", ["serious crisis"]],
["eliminate", ["entirely eliminate"]],
["especially", ["most especially"]],
["fact", ["actual fact"]],
["facts", ["true facts"]],
["forecast", ["future forecast"]],
["founding fathers", ["founding forefathers"]],
["free", ["free and gratis"]],
["free", ["free gratis"]],
["full", ["completely full"]],
["fundamentals", ["basic fundamentals"]],
["gift", ["free gift"]],
["innovation", ["new innovation"]],
["interact", ["interact with each other"]],
["large", ["large-size"]],
["meld", ["meld together"]],
["merge", ["merge together"]],
["mingle", ["mingle together"]],
["mix", ["mix together"]],
["mutual feelings", ["mutual feelings for eachother"]],
["mutual respect", ["mutual respect for each other"]],
["native citizen", ["native-born citizen"]],
["necessity", ["absolute necessity"]],
["obvious", ["blatantly obvious"]],
["pause", ["pause for a moment"]],
["planning", ["advance planning"]],
["plans", ["future plans"]],
["pooled", ["pooled together"]],
["potable water", ["potable drinking water"]],
["potable water", ["potable drinking water"]],
["recruit", ["new recruit"]],
["reelected", ["reelected for another term"]],
["refer", ["refer back"]],
["regress", ["regress back"]],
["repay them", ["repay them back"]],
["repay", ["repay back"]],
["repeat", ["repeat again"]],
["repeat", ["repeat back"]],
["repeat", ["repeat the same"]],
["repeated", ["repeated the same"]],
["reprieve", ["temporary reprieve"]],
["respite", ["brief respite"]],
["retirement", ["retiral", "retiracy"]],
["retreat", ["retreat back"]],
["return", ["return back"]],
["scrutinize", ["closely scrutinize"]],
["software", ["software program"]],
["surrounded", ["surrounded on all sides"]],
["the nation", ["the whole entire nation"]],
["throughout the", ["throughout the entire"]],
["timpani", ["timpani drum"]],
["twins", ["pair of twins"]],
["vacancy", ["unfilled vacancy"]],
["various", ["various different"]],
["veteran", ["former veteran"]],
["visible", ["visible to the eye"]],
["vocation", ["professional vocation"]],
["while", ["while at the same time"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_nordquist(text):
"""Suggest the preferred forms.
source: Richard Nordquist
source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm
"""
err = "redundancy.nordquist"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["essential", ["absolutely essential"]],
["necessary", ["absolutely necessary"]],
["a.m.", ["a.m. in the morning"]],
["p.m.", ["p.m. at night"]],
]
return preferred_forms_check(text, redundancies, err, msg)
@memoize
def check_atd(text):
"""Check for redundancies from After the Deadline."""
err = "after_the_deadline.redundancy"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [
["Bō", ["Bo Staff"]],
["Challah", ["Challah bread"]],
["Hallah", ["Hallah bread"]],
["Challah", ["Challah bread"]],
["I", ["I myself", "I personally"]],
["Mount Fuji", ["Mount Fujiyama"]],
["Milky Way", ["Milky Way galaxy"]],
["Rio Grande", ["Rio Grande river"]],
["adage", ["old adage"]],
["add", ["add a further", "add an additional"]],
["advance", ["advance forward"]],
["alternative", ["alternative choice"]],
["amaretto", ["amaretto almond"]],
["annihilate", ["completely annihilate"]],
["anniversary", ["annual anniversary"]],
["anonymous", ["unnamed anonymous"]],
["as", ["equally as"]],
["ascend", ["ascend up"]],
["ask", ["ask the question"]],
["assemble", ["assemble together"]],
["at present the", ["at the present time the"]],
["at this point", ["at this point in time"]],
["attach", ["attach together"]],
["autumn", ["autumn season"]],
["bald", ["bald-headed"]],
["balsa", ["balsa wood"]],
["belongings", ["personal belongings"]],
["benefits", ["desirable benefits"]],
["bento", ["bento box"]],
["best", ["best ever"]],
["bit", ["tiny bit"]],
["blend", ["blend together"]],
["bond", ["common bond"]],
["bonus", ["added bonus", "extra bonus"]],
["bouquet", ["bouquet of flowers"]],
["breakthrough", ["major breakthrough"]],
["bride", ["new bride"]],
["brief", ["brief in duration"]],
["bruin", ["bruin bear"]],
["hot", ["burning hot"]],
["cacophony", ["cacophony of sound"]],
["cameo", ["brief cameo", "cameo appearance"]],
["cancel", ["cancel out"]],
["cash", ["cash money"]],
["chai", ["chai tea"]],
["chance", ["random chance"]],
["charm", ["personal charm"]],
["circle", ["circle around", "round circle"]],
["circulate", ["circulate around"]],
["classify", ["classify into groups"]],
["classmates", ["fellow classmates"]],
["cliche", ["old cliche", "overused cliche"]],
["climb", ["climb up"]],
["clock", ["time clock"]],
["collaborate", ["collaborate together"]],
["collaboration", ["joint collaboration"]],
["colleague", ["fellow colleague"]],
["combine", ["combine together"]],
["commute", ["commute back and forth"]],
["compete", ["compete with each other"]],
["comprise", ["comprise of"]],
["comprises", ["comprises of"]],
["conceived", ["first conceived"]],
["conclusion", ["final conclusion"]],
["confer", ["confer together"]],
["confrontation", ["direct confrontation"]],
# ["confused", ["confused state"]],
["connect", ["connect together", "connect up"]],
["consensus", ["consensus of opinion", "general consensus"]],
["consult", ["consult with"]],
["conversation", ["oral conversation"]],
["cool", ["cool down"]],
["cooperate", ["cooperate together"]],
["cooperation", ["mutual cooperation"]],
["copy", ["duplicate copy"]],
["core", ["inner core"]],
["cost", ["cost the sum of"]],
["could", ["could possibly"]],
["coupon", ["money-saving coupon"]],
["created", ["originally created"]],
["crisis", ["crisis situation"]],
["crouch", ["crouch down"]],
["currently", ["now currently"]],
["custom", ["old custom", "usual custom"]],
["danger", ["serious danger"]],
["dates", ["dates back"]],
["decision", ["definite decision"]],
["depreciate", ["depreciate in value"]],
["descend", ["descend down"]],
["destroy", ["totally destroy"]],
["destroyed", ["completely destroyed"]],
["destruction", ["total destruction"]],
["details", ["specific details"]],
["dilemma", ["difficult dilemma"]],
["disappear", ["disappear from sight"]],
["discovered", ["originally discovered"]],
["dive", ["dive down"]],
["done", ["over and done with"]],
["drawing", ["illustrated drawing"]],
["drop", ["drop down"]],
["dune", ["sand dune"]],
["during", ["during the course of"]],
["dwindle", ["dwindle down"]],
["dwindled", ["dwindled down"]],
["every", ["each and every"]],
["earlier", ["earlier in time"]],
["eliminate", ["completely eliminate", "eliminate altogether",
"entirely eliminate"]],
["ember", ["glowing ember"]],
["embers", ["burning embers"]],
["emergency", ["emergency situation", "unexpected emergency"]],
["empty", ["empty out"]],
["enclosed", ["enclosed herein"]],
["end", ["final end"]],
["engulfed", ["completely engulfed"]],
["enter", ["enter in", "enter into"]],
["equal", ["equal to one another"]],
["eradicate", ["eradicate completely"]],
["essential", ["absolutely essential"]],
["estimated at", ["estimated at about",
"estimated at approximately",
"estimated at around"]],
["etc.", ["and etc."]],
["evolve", ["evolve over time"]],
["exaggerate", ["over exaggerate"]],
["exited", ["exited from"]],
["experience", ["actual experience", "past experience"]],
["experts", ["knowledgeable experts"]],
["extradite", ["extradite back"]],
["face the consequences", ["face up to the consequences"]],
["face the fact", ["face up to the fact"]],
["face the challenge", ["face up to the challenge"]],
["face the problem", ["face up to the problem"]],
["facilitate", ["facilitate easier"]],
["fact", ["established fact"]],
["facts", ["actual facts", "hard facts", "true facts"]],
["fad", ["passing fad"]],
["fall", ["fall down"]],
["fall", ["fall season"]],
["feat", ["major feat"]],
["feel", ["feel inside"]],
["feelings", ["inner feelings"]],
["few", ["few in number"]],
["filled", ["completely filled", "filled to capacity"]],
["first", ["first of all"]],
["first time", ["first time ever"]],
["fist", ["closed fist"]],
["fly", ["fly through the air"]],
["focus", ["focus in", "main focus"]],
["follow", ["follow after"]],
["for example", ["as for example"]],
# ["foremost", ["first and foremost"]],
["forever", ["forever and ever"]],
["free", ["for free"]],
["friend", ["personal friend"]],
["friendship", ["personal friendship"]],
["full", ["full to capacity"]],
["fundamentals", ["basic fundamentals"]],
["fuse", ["fuse together"]],
["gather", ["gather together", "gather up"]],
["get up", ["get up on his feet", "get up on your feet"]],
["gift", ["free gift"]],
["gifts", ["free gifts"]],
["goal", ["ultimate goal"]],
# ["graduate", ["former graduate"]],
["grow", ["grow in size"]],
["guarantee", ["absolute guarantee"]],
["gunman", ["armed gunman"]],
["gunmen", ["armed gunmen"]],
["habitat", ["native habitat"]],
["had done", ["had done previously"]],
["halves", ["two equal halves"]],
# ["has", ["has got"]],
# ["have", ["have got"]],
["haven", ["safe haven"]],
# ["he", ["he himself"]],
["heat", ["heat up"]],
["history", ["past history"]],
["hoist", ["hoist up"]],
["hole", ["empty hole"]],
["honcho", ["head honcho"]],
["ice", ["frozen ice"]],
["ideal", ["perfect ideal"]],
["identical", ["same identical"]],
["identification", ["positive identification"]],
["imports", ["foreign imports"]],
["impulse", ["sudden impulse"]],
["in fact", ["in actual fact"]],
["in the yard", ["outside in the yard"]],
["inclusive", ["all inclusive"]],
["incredible", ["incredible to believe"]],
["incumbent", ["present incumbent"]],
# ["indicted", ["indicted on a charge"]],
["industry", ["private industry"]],
["injuries", ["harmful injuries"]],
["innovation", ["new innovation"]],
["innovative", ["innovative new", "new innovative"]],
# ["input", ["input into"]],
["instinct", ["natural instinct", "naturally instinct"]],
["integrate", ["integrate together",
"integrate with each other"]],
["interdependent", ["interdependent on each other",
"mutually interdependent"]],
["introduced", ["introduced for the first time"]],
["invention", ["new invention"]],
["kneel", ["kneel down"]],
["knots", ["knots per hour"]],
# ["last", ["last of all"]],
# ["later", ["later time"]],
["lift", ["lift up"]],
["lingers", ["still lingers"]],
["look to the future", ["look ahead to the future"]],
["love triangle", ["three-way love triangle"]],
["maintained", ["constantly maintained"]],
["manually", ["manually by hand"]],
["marina", ["boat marina"]],
["may", ["may possibly"]],
["meet", ["meet together", "meet with each other"]],
["memories", ["past memories"]],
["merge", ["merge together"]],
["merged", ["merged together"]],
["meshed", ["meshed together"]],
["midnight", ["twelve midnight"]],
["migraine", ["migraine headache"]],
["minestrone", ["minestrone soup"]],
["mix", ["mix together"]],
["moment", ["brief moment", "moment in time"]],
["monopoly", ["complete monopoly"]],
["mural", ["wall mural"]],
["mutual respect", ["mutual respect for each other"]],
["mutually dependent", ["mutually dependent on each other"]],
["mystery", ["unsolved mystery"]],
# ["naked", ["bare naked"]],
["nape", ["nape of her neck"]],
["necessary", ["absolutely necessary"]],
["never", ["never at any time"]],
["noon", ["12 noon", "12 o'clock noon", "high noon",
"twelve noon"]],
["nostalgia", ["nostalgia for the past"]],
["number of", ["number of different"]],
["opening", ["exposed opening"]],
["my opinion", ["my personal opinion"]],
["opposites", ["exact opposites", "polar opposites"]],
["opposite", ["exact opposite", "polar opposite"]],
["orbits", ["orbits around"]],
["outcome", ["final outcome"]],
["panacea", ["universal panacea"]],
["pending", ["now pending"]],
["penetrate", ["penetrate through"]],
["persists", ["still persists"]],
["pioneer", ["old pioneer"]],
["plan", ["plan ahead", "plan in advance",
"proposed plan"]],
["planning", ["advance planning", "forward planning"]],
["plans", ["future plans"]],
["plan", ["future plan"]],
["point", ["point in time"]],
["point", ["sharp point"]],
["postpone", ["postpone until later"]],
["pouring rain", ["pouring down rain"]],
["preview", ["advance preview"]],
["previously listed", ["previously listed above"]],
["probed", ["probed into"]],
["proceed", ["proceed ahead"]],
["prosthesis", ["artificial prosthesis"]],
# ["protrude", ["protrude out"]],
["proverb", ["old proverb"]],
# ["proximity", ["close proximity"]],
["put off", ["put off until later"]],
# ["raise", ["raise up"]],
["re-elect", ["re-elect for another term"]],
["reason is", ["reason is because"]],
["recur", ["recur again"]],
["recurrence", ["future recurrence"]],
["refer", ["refer back"]],
["reflect", ["reflect back"]],
# ["relevant", ["highly relevant"]],
["remain", ["continue to remain"]],
["remains", ["still remains"]],
["replica", ["exact replica"]],
["reply", ["reply back"]],
# ["requirements", ["necessary requirements"]],
["reservations", ["advance reservations"]],
["retreat", ["retreat back"]],
["revert", ["revert back"]],
["round", ["round in shape"]],
["rule of thumb", ["rough rule of thumb"]],
["rumor", ["unconfirmed rumor"]],
["rustic", ["rustic country"]],
["same", ["exact same", "precise same", "same exact"]],
["sanctuary", ["safe sanctuary"]],
["satisfaction", ["full satisfaction"]],
["scrutinize", ["scrutinize in detail"]],
["scrutiny", ["careful scrutiny", "close scrutiny"]],
["secret", ["secret that cannot be told"]],
["seek", ["seek to find"]],
["separated", ["separated apart from each other"]],
["share", ["share together"]],
["shiny", ["shiny in appearance"]],
["sincere", ["truly sincere"]],
["sink", ["sink down"]],
["skipped", ["skipped over"]],
# ["slow", ["slow speed"]],
# ["small", ["small size"]],
["soft", ["soft in texture", "soft to the touch"]],
["sole", ["sole of the foot"]],
["some time", ["some time to come"]],
["speck", ["small speck"]],
["speed", ["rate of speed"]],
["spell out", ["spell out in detail"]],
["spiked", ["spiked upward", "spiked upwards"]],
["spring", ["spring season"]],
["stranger", ["anonymous stranger"]],
["studio audience", ["live studio audience"]],
["subway", ["underground subway"]],
["sufficient", ["sufficient enough"]],
["summer", ["summer season"]],
["sure", ["absolutely sure"]],
["surprise", ["unexpected surprise"]],
["surround", ["completely surround"]],
["surrounded", ["surrounded on all sides"]],
["tall", ["tall in height", "tall in stature"]],
["telepathy", ["mental telepathy"]],
["ten", ["ten in number"]],
["these", ["these ones"]],
# ["they", ["they themselves"]],
["those", ["those ones"]],
["trench", ["open trench"]],
["truth", ["honest truth"]],
["tundra", ["frozen tundra"]],
["ultimatum", ["final ultimatum"]],
# ["undeniable", ["undeniable truth"]],
["undergraduate", ["undergraduate student"]],
# ["unintentional", ["unintentional mistake"]],
["vacillate", ["vacillate back and forth"]],
["veteran", ["former veteran"]],
["visible", ["visible to the eye"]],
["warn", ["warn in advance"]],
["warning", ["advance warning"]],
["water heater", ["hot water heater"]],
["in which we live", ["in which we live in"]],
["winter", ["winter season"]],
["witness", ["live witness"]],
["yakitori", ["yakitori chicken"]],
["yerba mate", ["yerba mate tea"]],
["yes", ["affirmative yes"]],
]
return preferred_forms_check(text, redundancies, err, msg)
| amperser/proselint | proselint/checks/redundancy/misc.py | Python | bsd-3-clause | 25,051 |
from insights.tests import context_wrap
from insights.parsers.qemu_conf import QemuConf
qemu_conf_content = """
vnc_listen = "0.0.0.0"
vnc_auto_unix_socket = 1
vnc_tls = 1
# comment line
vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
security_driver = "selinux" #inline comment
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",
"/dev/random", "/dev/urandom",
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
"/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
]
"""
qemu_conf_comment = """
# comment line
# comment line
"""
def test_sssd_conf():
result = QemuConf(context_wrap(qemu_conf_content))
assert result.get("vnc_listen") == '0.0.0.0'
assert result.get("vnc_tls") == '1'
assert "/dev/zero" in result.get('cgroup_device_acl')
assert result.get('security_driver') == 'selinux'
assert isinstance(result.get('cgroup_device_acl'), list)
result = QemuConf(context_wrap(qemu_conf_comment))
assert result.data == {}
| RedHatInsights/insights-core | insights/parsers/tests/test_qemu_conf.py | Python | apache-2.0 | 995 |
import unittest
from datetime import date, timedelta
from prescription import Prescription
class PrescriptionTest(unittest.TestCase):
def test_completion_date(self):
prescription = Prescription(dispense_date = date.today() - timedelta(days=15), days_supply = 30)
self.assertEquals(date.today() + timedelta(days=15), prescription.completion_date())
def test_days_supply(self):
prescription = Prescription(dispense_date = date.today(), days_supply = 3)
self.assertEquals([date.today(), date.today()+timedelta(days=1), date.today()+timedelta(days=2)], prescription.days_taken())
if __name__ == "__main__":
unittest.main() | emilybache/KataMedicineClash | Refactoring/Python/prescription_test.py | Python | mit | 695 |
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import *
from direct.showbase import PythonUtil
from direct.task import Task
from panda3d.core import *
import DisplaySettingsDialog
import ShtikerPage
from otp.speedchat import SCColorScheme
from otp.speedchat import SCStaticTextTerminal
from otp.speedchat import SpeedChat
from toontown.toonbase import TTLocalizer, ToontownGlobals
from toontown.toon import Toon
from toontown.toontowngui import TTDialog
from toontown.shtiker import ControlRemapDialog
from toontown.toontowngui import FeatureComingSoonDialog
speedChatStyles = (
(
2000,
(200 / 255.0, 60 / 255.0, 229 / 255.0),
(200 / 255.0, 135 / 255.0, 255 / 255.0),
(220 / 255.0, 195 / 255.0, 229 / 255.0)
),
(
2012,
(142 / 255.0, 151 / 255.0, 230 / 255.0),
(173 / 255.0, 180 / 255.0, 237 / 255.0),
(220 / 255.0, 195 / 255.0, 229 / 255.0)
),
(
2001,
(0 / 255.0, 0 / 255.0, 255 / 255.0),
(140 / 255.0, 150 / 255.0, 235 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2010,
(0 / 255.0, 119 / 255.0, 190 / 255.0),
(53 / 255.0, 180 / 255.0, 255 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2014,
(0 / 255.0, 64 / 255.0, 128 / 255.0),
(0 / 255.0, 64 / 255.0, 128 / 255.0),
(201 / 255.0, 215 / 255.0, 255 / 255.0)
),
(
2002,
(90 / 255.0, 175 / 255.0, 225 / 255.0),
(120 / 255.0, 215 / 255.0, 255 / 255.0),
(208 / 255.0, 230 / 255.0, 250 / 255.0)
),
(
2003,
(130 / 255.0, 235 / 255.0, 235 / 255.0),
(120 / 255.0, 225 / 255.0, 225 / 255.0),
(234 / 255.0, 255 / 255.0, 255 / 255.0)
),
(
2004,
(0 / 255.0, 200 / 255.0, 70 / 255.0),
(0 / 255.0, 200 / 255.0, 80 / 255.0),
(204 / 255.0, 255 / 255.0, 204 / 255.0)
),
(
2015,
(13 / 255.0, 255 / 255.0, 100 / 255.0),
(64 / 255.0, 255 / 255.0, 131 / 255.0),
(204 / 255.0, 255 / 255.0, 204 / 255.0)
),
(
2005,
(235 / 255.0, 230 / 255.0, 0 / 255.0),
(255 / 255.0, 250 / 255.0, 100 / 255.0),
(255 / 255.0, 250 / 255.0, 204 / 255.0)
),
(
2006,
(255 / 255.0, 153 / 255.0, 0 / 255.0),
(229 / 255.0, 147 / 255.0, 0 / 255.0),
(255 / 255.0, 234 / 255.0, 204 / 255.0)
),
(
2011,
(255 / 255.0, 177 / 255.0, 62 / 255.0),
(255 / 255.0, 200 / 255.0, 117 / 255.0),
(255 / 255.0, 234 / 255.0, 204 / 255.0)
),
(
2007,
(255 / 255.0, 0 / 255.0, 50 / 255.0),
(229 / 255.0, 0 / 255.0, 50 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2013,
(130 / 255.0, 0 / 255.0, 26 / 255.0),
(179 / 255.0, 0 / 255.0, 50 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2016,
(176 / 255.0, 35 / 255.0, 0 / 255.0),
(240 / 255.0, 48 / 255.0, 0 / 255.0),
(255 / 255.0, 204 / 255.0, 204 / 255.0)
),
(
2008,
(255 / 255.0, 153 / 255.0, 193 / 255.0),
(240 / 255.0, 157 / 255.0, 192 / 255.0),
(255 / 255.0, 215 / 255.0, 238 / 255.0)
),
(
2009,
(170 / 255.0, 120 / 255.0, 20 / 255.0),
(165 / 255.0, 120 / 255.0, 50 / 255.0),
(210 / 255.0, 200 / 255.0, 180 / 255.0)
)
)
PageMode = PythonUtil.Enum('Options, Codes, Special')
class OptionsPage(ShtikerPage.ShtikerPage):
notify = directNotify.newCategory('OptionsPage')
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.optionsTabPage = None
self.codesTabPage = None
self.specialOptionsTabPage = None
self.title = None
self.optionsTab = None
self.codesTab = None
self.specialOptionsTab = None
def load(self):
ShtikerPage.ShtikerPage.load(self)
self.optionsTabPage = OptionsTabPage(self)
self.optionsTabPage.hide()
self.codesTabPage = CodesTabPage(self)
self.codesTabPage.hide()
self.specialOptionsTabPage = SpecialOptionsTabPage(self)
self.specialOptionsTabPage.hide()
self.title = DirectLabel(
parent=self, relief=None, text=TTLocalizer.OptionsPageTitle,
text_scale=0.12, pos=(0, 0, 0.61))
gui = loader.loadModel('phase_3.5/models/gui/fishingBook.bam')
normalColor = (1, 1, 1, 1)
clickColor = (0.8, 0.8, 0, 1)
rolloverColor = (0.15, 0.82, 1.0, 1)
diabledColor = (1.0, 0.98, 0.15, 1)
self.optionsTab = DirectButton(
parent=self, relief=None, text=TTLocalizer.OptionsPageTitle,
text_scale=TTLocalizer.OPoptionsTab, text_align=TextNode.ALeft,
text_pos=(0.01, 0.0, 0.0), image=gui.find('**/tabs/polySurface1'),
image_pos=(0.55, 1, -0.91), image_hpr=(0, 0, -90),
image_scale=(0.033, 0.033, 0.035), image_color=normalColor,
image1_color=clickColor, image2_color=rolloverColor,
image3_color=diabledColor, text_fg=Vec4(0.2, 0.1, 0, 1),
command=self.setMode, extraArgs=[PageMode.Options],
pos=(-0.64, 0, 0.77))
self.codesTab = DirectButton(
parent=self, relief=None, text=TTLocalizer.OptionsPageCodesTab,
text_scale=TTLocalizer.OPoptionsTab, text_align=TextNode.ALeft,
text_pos=(-0.035, 0.0, 0.0),
image=gui.find('**/tabs/polySurface2'), image_pos=(0.12, 1, -0.91),
image_hpr=(0, 0, -90), image_scale=(0.033, 0.033, 0.035),
image_color=normalColor, image1_color=clickColor,
image2_color=rolloverColor, image3_color=diabledColor,
text_fg=Vec4(0.2, 0.1, 0, 1), command=self.setMode,
extraArgs=[PageMode.Codes], pos=(-0.12, 0, 0.77))
self.specialOptionsTab = DirectButton(
parent=self, relief=None, text=TTLocalizer.OptionsPageSpecial,
text_scale=TTLocalizer.OPoptionsTab, text_align=TextNode.ALeft,
text_pos=(0.027, 0.0, 0.0),
image=gui.find('**/tabs/polySurface2'), image_pos=(0.12, 1, -0.91),
image_hpr=(0, 0, -90), image_scale=(0.033, 0.033, 0.035),
image_color=normalColor, image1_color=clickColor,
image2_color=rolloverColor, image3_color=diabledColor,
text_fg=Vec4(0.2, 0.1, 0, 1), command=self.setMode,
extraArgs=[PageMode.Special], pos=(0.42, 0, 0.77))
gui.removeNode()
def enter(self):
self.setMode(PageMode.Options, updateAnyways=1)
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
self.optionsTabPage.exit()
self.codesTabPage.exit()
self.specialOptionsTabPage.exit()
ShtikerPage.ShtikerPage.exit(self)
def unload(self):
if self.optionsTabPage is not None:
self.optionsTabPage.unload()
self.optionsTabPage = None
if self.codesTabPage is not None:
self.codesTabPage.unload()
self.codesTabPage = None
if self.title is not None:
self.title.destroy()
self.title = None
if self.optionsTab is not None:
self.optionsTab.destroy()
self.optionsTab = None
if self.codesTab is not None:
self.codesTab.destroy()
self.codesTab = None
if self.specialOptionsTab is not None:
self.specialOptionsTab.destroy()
self.specialOptionsTab = None
ShtikerPage.ShtikerPage.unload(self)
def setMode(self, mode, updateAnyways=0):
messenger.send('wakeup')
if not updateAnyways:
if self.mode == mode:
return
self.mode = mode
if mode == PageMode.Options:
self.title['text'] = TTLocalizer.OptionsPageTitle
self.optionsTab['state'] = DGG.DISABLED
self.optionsTabPage.enter()
self.codesTab['state'] = DGG.NORMAL
self.codesTabPage.exit()
self.specialOptionsTab['state'] = DGG.NORMAL
self.specialOptionsTabPage.exit()
elif mode == PageMode.Codes:
self.title['text'] = TTLocalizer.CdrPageTitle
self.optionsTab['state'] = DGG.NORMAL
self.optionsTabPage.exit()
self.specialOptionsTab['state'] = DGG.NORMAL
self.specialOptionsTabPage.exit()
self.codesTab['state'] = DGG.DISABLED
self.codesTabPage.enter()
elif mode == PageMode.Special:
self.title['text'] = TTLocalizer.OptionsPageSpecial
self.optionsTab['state'] = DGG.NORMAL
self.optionsTabPage.exit()
self.codesTab['state'] = DGG.NORMAL
self.codesTabPage.exit()
self.specialOptionsTab['state'] = DGG.DISABLED
self.specialOptionsTabPage.enter()
class OptionsTabPage(DirectFrame):
notify = directNotify.newCategory('OptionsTabPage')
DisplaySettingsTaskName = 'save-display-settings'
DisplaySettingsDelay = 60
ChangeDisplaySettings = base.config.GetBool('change-display-settings', 1)
ChangeDisplayAPI = base.config.GetBool('change-display-api', 0)
def __init__(self, parent = aspect2d):
self._parent = parent
self.currentSizeIndex = None
DirectFrame.__init__(self, parent=self._parent, relief=None, pos=(0.0, 0.0, 0.0), scale=(1.0, 1.0, 1.0))
self.load()
def destroy(self):
self._parent = None
DirectFrame.destroy(self)
def load(self):
self.displaySettings = None
self.displaySettingsChanged = 0
self.displaySettingsSize = (None, None)
self.displaySettingsFullscreen = None
self.displaySettingsBorderless = None
self.displaySettingsApi = None
self.displaySettingsApiChanged = 0
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
circleModel = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_nameShop')
titleHeight = 0.61
textStartHeight = 0.45
textRowHeight = 0.145
leftMargin = -0.72
buttonbase_xcoord = 0.35
buttonbase_ycoord = 0.45
button_image_scale = (0.7, 1, 1)
button_textpos = (0, -0.02)
options_text_scale = 0.052
disabled_arrow_color = Vec4(0.6, 0.6, 0.6, 1.0)
self.speed_chat_scale = 0.055
self.Music_Label = DirectLabel(parent=self, relief=None, text=TTLocalizer.OptionsPageMusic, text_align=TextNode.ALeft, text_scale=options_text_scale, pos=(leftMargin, 0, textStartHeight))
self.SoundFX_Label = DirectLabel(parent=self, relief=None, text=TTLocalizer.OptionsPageSFX, text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=16, pos=(leftMargin, 0, textStartHeight - textRowHeight))
self.Friends_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=16, pos=(leftMargin, 0, textStartHeight - 3 * textRowHeight))
self.Whispers_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=16, pos=(leftMargin, 0, textStartHeight - 4 * textRowHeight))
self.DisplaySettings_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=10, pos=(leftMargin, 0, textStartHeight - 5 * textRowHeight))
self.SpeedChatStyle_Label = DirectLabel(parent=self, relief=None, text=TTLocalizer.OptionsPageSpeedChatStyleLabel, text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=10, pos=(leftMargin, 0, textStartHeight - 6 * textRowHeight))
self.ToonChatSounds_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=15, pos=(leftMargin, 0, textStartHeight - 2 * textRowHeight + 0.025))
self.ToonChatSounds_Label.setScale(0.9)
self.Music_toggleSlider = DirectSlider(parent=self, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord),
value=settings['musicVol']*100, pageSize=5, range=(0, 100), command=self.__doMusicLevel,
thumb_geom=(circleModel.find('**/tt_t_gui_mat_namePanelCircle')), thumb_relief=None, thumb_geom_scale=2)
self.Music_toggleSlider.setScale(0.25)
self.SoundFX_toggleSlider = DirectSlider(parent=self, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight),
value=settings['sfxVol']*100, pageSize=5, range=(0, 100), command=self.__doSfxLevel,
thumb_geom=(circleModel.find('**/tt_t_gui_mat_namePanelCircle')), thumb_relief=None, thumb_geom_scale=2)
self.SoundFX_toggleSlider.setScale(0.25)
self.Friends_toggleButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=button_image_scale, text='', text_scale=options_text_scale, text_pos=button_textpos, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight * 3), command=self.__doToggleAcceptFriends)
self.Whispers_toggleButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=button_image_scale, text='', text_scale=options_text_scale, text_pos=button_textpos, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight * 4), command=self.__doToggleAcceptWhispers)
self.DisplaySettingsButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image3_color=Vec4(0.5, 0.5, 0.5, 0.5), image_scale=button_image_scale, text=TTLocalizer.OptionsPageChange, text3_fg=(0.5, 0.5, 0.5, 0.75), text_scale=options_text_scale, text_pos=button_textpos, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight * 5), command=self.__doDisplaySettings)
self.speedChatStyleLeftArrow = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(1, 1, 1, 0.5), scale=(-1.0, 1.0, 1.0), pos=(0.25, 0, buttonbase_ycoord - textRowHeight * 6), command=self.__doSpeedChatStyleLeft)
self.speedChatStyleRightArrow = DirectButton(parent=self, relief=None, image=(gui.find('**/Horiz_Arrow_UP'),
gui.find('**/Horiz_Arrow_DN'),
gui.find('**/Horiz_Arrow_Rllvr'),
gui.find('**/Horiz_Arrow_UP')), image3_color=Vec4(1, 1, 1, 0.5), pos=(0.65, 0, buttonbase_ycoord - textRowHeight * 6), command=self.__doSpeedChatStyleRight)
self.ToonChatSounds_toggleButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR'),
guiButton.find('**/QuitBtn_UP')), image3_color=Vec4(0.5, 0.5, 0.5, 0.5), image_scale=button_image_scale, text='', text3_fg=(0.5, 0.5, 0.5, 0.75), text_scale=options_text_scale, text_pos=button_textpos, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight * 2 + 0.025), command=self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.speedChatStyleText = SpeedChat.SpeedChat(name='OptionsPageStyleText', structure=[2000], backgroundModelName='phase_3/models/gui/ChatPanel', guiModelName='phase_3.5/models/gui/speedChatGui')
self.speedChatStyleText.setScale(self.speed_chat_scale)
self.speedChatStyleText.setPos(0.37, 0, buttonbase_ycoord - textRowHeight * 6 + 0.03)
self.speedChatStyleText.reparentTo(self, DGG.FOREGROUND_SORT_INDEX)
self.exitButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=1.15, text=TTLocalizer.OptionsPageExitToontown, text_scale=options_text_scale, text_pos=button_textpos, textMayChange=0, pos=(0.45, 0, -0.6), command=self.__handleExitShowWithConfirm)
guiButton.removeNode()
gui.removeNode()
def enter(self):
self.show()
taskMgr.remove(self.DisplaySettingsTaskName)
self.settingsChanged = 0
self.__setAcceptFriendsButton()
self.__setAcceptWhispersButton()
self.__setDisplaySettings()
self.__setToonChatSoundsButton()
self.speedChatStyleText.enter()
self.speedChatStyleIndex = base.localAvatar.getSpeedChatStyleIndex()
self.updateSpeedChatStyle()
if self._parent.book.safeMode:
self.exitButton.hide()
else:
self.exitButton.show()
def exit(self):
self.ignore('confirmDone')
self.hide()
self.speedChatStyleText.exit()
if self.displaySettingsChanged:
taskMgr.doMethodLater(self.DisplaySettingsDelay, self.writeDisplaySettings, self.DisplaySettingsTaskName)
def unload(self):
self.writeDisplaySettings()
taskMgr.remove(self.DisplaySettingsTaskName)
if self.displaySettings != None:
self.ignore(self.displaySettings.doneEvent)
self.displaySettings.unload()
self.displaySettings = None
self.exitButton.destroy()
self.Music_toggleSlider.destroy()
self.SoundFX_toggleSlider.destroy()
self.Friends_toggleButton.destroy()
self.Whispers_toggleButton.destroy()
self.DisplaySettingsButton.destroy()
self.speedChatStyleLeftArrow.destroy()
self.speedChatStyleRightArrow.destroy()
del self.exitButton
del self.SoundFX_Label
del self.Music_Label
del self.Friends_Label
del self.Whispers_Label
del self.SpeedChatStyle_Label
del self.SoundFX_toggleSlider
del self.Music_toggleSlider
del self.Friends_toggleButton
del self.Whispers_toggleButton
del self.speedChatStyleLeftArrow
del self.speedChatStyleRightArrow
self.speedChatStyleText.exit()
self.speedChatStyleText.destroy()
del self.speedChatStyleText
self.currentSizeIndex = None
def __doMusicLevel(self):
vol = self.Music_toggleSlider['value']
vol = float(vol) / 100
settings['musicVol'] = vol
base.musicManager.setVolume(vol)
base.musicActive = vol > 0.0
def __doSfxLevel(self):
vol = self.SoundFX_toggleSlider['value']
vol = float(vol) / 100
settings['sfxVol'] = vol
for sfm in base.sfxManagerList:
sfm.setVolume(vol)
base.sfxActive = vol > 0.0
self.__setToonChatSoundsButton()
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doToggleAcceptFriends(self):
messenger.send('wakeup')
acceptingNewFriends = settings.get('acceptingNewFriends', {})
if base.localAvatar.acceptingNewFriends:
base.localAvatar.acceptingNewFriends = 0
acceptingNewFriends[str(base.localAvatar.doId)] = False
else:
base.localAvatar.acceptingNewFriends = 1
acceptingNewFriends[str(base.localAvatar.doId)] = True
settings['acceptingNewFriends'] = acceptingNewFriends
self.settingsChanged = 1
self.__setAcceptFriendsButton()
def __doToggleAcceptWhispers(self):
messenger.send('wakeup')
acceptingNonFriendWhispers = settings.get('acceptingNonFriendWhispers', {})
if base.localAvatar.acceptingNonFriendWhispers:
base.localAvatar.acceptingNonFriendWhispers = 0
acceptingNonFriendWhispers[str(base.localAvatar.doId)] = False
else:
base.localAvatar.acceptingNonFriendWhispers = 1
acceptingNonFriendWhispers[str(base.localAvatar.doId)] = True
settings['acceptingNonFriendWhispers'] = acceptingNonFriendWhispers
self.settingsChanged = 1
self.__setAcceptWhispersButton()
def __setAcceptFriendsButton(self):
if base.localAvatar.acceptingNewFriends:
self.Friends_Label['text'] = TTLocalizer.OptionsPageFriendsEnabledLabel
self.Friends_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.Friends_Label['text'] = TTLocalizer.OptionsPageFriendsDisabledLabel
self.Friends_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
def __setAcceptWhispersButton(self):
if base.localAvatar.acceptingNonFriendWhispers:
self.Whispers_Label['text'] = TTLocalizer.OptionsPageWhisperEnabledLabel
self.Whispers_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.Whispers_Label['text'] = TTLocalizer.OptionsPageWhisperDisabledLabel
self.Whispers_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
def __doDisplaySettings(self):
if self.displaySettings == None:
self.displaySettings = DisplaySettingsDialog.DisplaySettingsDialog()
self.displaySettings.load()
self.accept(self.displaySettings.doneEvent, self.__doneDisplaySettings)
self.displaySettings.enter(self.ChangeDisplaySettings, self.ChangeDisplayAPI)
def __doneDisplaySettings(self, anyChanged, apiChanged):
if anyChanged:
self.__setDisplaySettings()
properties = base.win.getProperties()
self.displaySettingsChanged = 1
self.displaySettingsSize = (properties.getXSize(), properties.getYSize())
self.displaySettingsFullscreen = properties.getFullscreen()
self.displaySettingsBorderless = properties.getUndecorated()
self.displaySettingsApi = base.pipe.getInterfaceName()
self.displaySettingsApiChanged = apiChanged
def __setDisplaySettings(self):
properties = base.win.getProperties()
if properties.getFullscreen():
screensize = 'Fullscreen | %s x %s' % (properties.getXSize(), properties.getYSize())
elif properties.getUndecorated():
screensize = 'Borderless Windowed | %s x %s' % (properties.getXSize(), properties.getYSize())
else:
screensize = 'Windowed'
api = base.pipe.getInterfaceName()
settings = {'screensize': screensize, 'api': api}
text = TTLocalizer.OptionsPageDisplaySettings % settings
self.DisplaySettings_Label['text'] = text
def __doSpeedChatStyleLeft(self):
if self.speedChatStyleIndex > 0:
self.speedChatStyleIndex = self.speedChatStyleIndex - 1
self.updateSpeedChatStyle()
def __doSpeedChatStyleRight(self):
if self.speedChatStyleIndex < len(speedChatStyles) - 1:
self.speedChatStyleIndex = self.speedChatStyleIndex + 1
self.updateSpeedChatStyle()
def updateSpeedChatStyle(self):
nameKey, arrowColor, rolloverColor, frameColor = speedChatStyles[self.speedChatStyleIndex]
newSCColorScheme = SCColorScheme.SCColorScheme(arrowColor=arrowColor, rolloverColor=rolloverColor, frameColor=frameColor)
self.speedChatStyleText.setColorScheme(newSCColorScheme)
self.speedChatStyleText.clearMenu()
colorName = SCStaticTextTerminal.SCStaticTextTerminal(nameKey)
self.speedChatStyleText.append(colorName)
self.speedChatStyleText.finalize()
self.speedChatStyleText.setPos(0.445 - self.speedChatStyleText.getWidth() * self.speed_chat_scale / 2, 0, self.speedChatStyleText.getPos()[2])
if self.speedChatStyleIndex > 0:
self.speedChatStyleLeftArrow['state'] = DGG.NORMAL
else:
self.speedChatStyleLeftArrow['state'] = DGG.DISABLED
if self.speedChatStyleIndex < len(speedChatStyles) - 1:
self.speedChatStyleRightArrow['state'] = DGG.NORMAL
else:
self.speedChatStyleRightArrow['state'] = DGG.DISABLED
base.localAvatar.b_setSpeedChatStyleIndex(self.speedChatStyleIndex)
def writeDisplaySettings(self, task=None):
if not self.displaySettingsChanged:
return
taskMgr.remove(self.DisplaySettingsTaskName)
settings['res'] = (self.displaySettingsSize[0], self.displaySettingsSize[1])
settings['fullscreen'] = self.displaySettingsFullscreen
return Task.done
def __handleExitShowWithConfirm(self):
self.confirm = TTDialog.TTGlobalDialog(doneEvent='confirmDone', message=TTLocalizer.OptionsPageExitConfirm, style=TTDialog.TwoChoice)
self.confirm.show()
self._parent.doneStatus = {'mode': 'exit',
'exitTo': 'closeShard'}
self.accept('confirmDone', self.__handleConfirm)
def __handleConfirm(self):
status = self.confirm.doneStatus
self.ignore('confirmDone')
self.confirm.cleanup()
del self.confirm
if status == 'ok':
base.cr._userLoggingOut = True
messenger.send(self._parent.doneEvent)
class CodesTabPage(DirectFrame):
notify = directNotify.newCategory('CodesTabPage')
def __init__(self, parent = aspect2d):
self._parent = parent
DirectFrame.__init__(self, parent=self._parent, relief=None, pos=(0.0, 0.0, 0.0), scale=(1.0, 1.0, 1.0))
self.load()
return
def destroy(self):
self._parent = None
DirectFrame.destroy(self)
return
def load(self):
self.notice = DirectLabel(parent=self, relief=None, text='NOTICE: All codes can only be entered once!', text_scale=0.06, pos=(0.0, 0, 0.53), text_fg=Vec4(1.0, 0, 0, 1))
cdrGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_sbk_codeRedemptionGui')
instructionGui = cdrGui.find('**/tt_t_gui_sbk_cdrPresent')
flippyGui = cdrGui.find('**/tt_t_gui_sbk_cdrFlippy')
codeBoxGui = cdrGui.find('**/tt_t_gui_sbk_cdrCodeBox')
self.resultPanelSuccessGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_success')
self.resultPanelFailureGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_failure')
self.resultPanelErrorGui = cdrGui.find('**/tt_t_gui_sbk_cdrResultPanel_error')
self.successSfx = base.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrSuccess.ogg')
self.failureSfx = base.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrFailure.ogg')
self.instructionPanel = DirectFrame(parent=self, relief=None, image=instructionGui, image_scale=0.8, text=TTLocalizer.CdrInstructions, text_pos=TTLocalizer.OPCodesInstructionPanelTextPos, text_align=TextNode.ACenter, text_scale=TTLocalizer.OPCodesResultPanelTextScale, text_wordwrap=TTLocalizer.OPCodesInstructionPanelTextWordWrap, pos=(-0.429, 0, -0.05))
self.codeBox = DirectFrame(parent=self, relief=None, image=codeBoxGui, pos=(0.433, 0, 0.35))
self.flippyFrame = DirectFrame(parent=self, relief=None, image=flippyGui, pos=(0.44, 0, -0.353))
self.codeInput = DirectEntry(parent=self.codeBox, relief=DGG.GROOVE, scale=0.08, pos=(-0.33, 0, -0.006), borderWidth=(0.05, 0.05), frameColor=((1, 1, 1, 1), (1, 1, 1, 1), (0.5, 0.5, 0.5, 0.5)), state=DGG.NORMAL, text_align=TextNode.ALeft, text_scale=TTLocalizer.OPCodesInputTextScale, width=10.5, numLines=1, focus=1, backgroundFocus=0, cursorKeys=1, text_fg=(0, 0, 0, 1), suppressMouse=1, autoCapitalize=0, command=self.__submitCode)
submitButtonGui = loader.loadModel('phase_3/models/gui/quit_button')
self.submitButton = DirectButton(parent=self, relief=None, image=(submitButtonGui.find('**/QuitBtn_UP'),
submitButtonGui.find('**/QuitBtn_DN'),
submitButtonGui.find('**/QuitBtn_RLVR'),
submitButtonGui.find('**/QuitBtn_UP')), image3_color=Vec4(0.5, 0.5, 0.5, 0.5), image_scale=1.15, state=DGG.NORMAL, text=TTLocalizer.NameShopSubmitButton, text_scale=TTLocalizer.OPCodesSubmitTextScale, text_align=TextNode.ACenter, text_pos=TTLocalizer.OPCodesSubmitTextPos, text3_fg=(0.5, 0.5, 0.5, 0.75), textMayChange=0, pos=(0.45, 0.0, 0.0896), command=self.__submitCode)
self.resultPanel = DirectFrame(parent=self, relief=None, image=self.resultPanelSuccessGui, text='', text_pos=TTLocalizer.OPCodesResultPanelTextPos, text_align=TextNode.ACenter, text_scale=TTLocalizer.OPCodesResultPanelTextScale, text_wordwrap=TTLocalizer.OPCodesResultPanelTextWordWrap, pos=(-0.42, 0, -0.0567))
self.resultPanel.hide()
closeButtonGui = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.closeButton = DirectButton(parent=self.resultPanel, pos=(0.296, 0, -0.466), relief=None, state=DGG.NORMAL, image=(closeButtonGui.find('**/CloseBtn_UP'), closeButtonGui.find('**/CloseBtn_DN'), closeButtonGui.find('**/CloseBtn_Rllvr')), image_scale=(1, 1, 1), command=self.__hideResultPanel)
closeButtonGui.removeNode()
cdrGui.removeNode()
submitButtonGui.removeNode()
return
def enter(self):
self.show()
localAvatar.chatMgr.fsm.request('otherDialog')
self.codeInput['focus'] = 1
self.codeInput.enterText('')
self.__enableCodeEntry()
def exit(self):
self.resultPanel.hide()
self.hide()
localAvatar.chatMgr.fsm.request('mainMenu')
def unload(self):
self.instructionPanel.destroy()
self.instructionPanel = None
self.codeBox.destroy()
self.codeBox = None
self.flippyFrame.destroy()
self.flippyFrame = None
self.codeInput.destroy()
self.codeInput = None
self.submitButton.destroy()
self.submitButton = None
self.resultPanel.destroy()
self.resultPanel = None
self.closeButton.destroy()
self.closeButton = None
del self.successSfx
del self.failureSfx
return
def __submitCode(self, input = None):
if input == None:
input = self.codeInput.get()
self.codeInput['focus'] = 1
if input == '':
return
messenger.send('wakeup')
if hasattr(base.cr, 'codeRedemptionMgr'):
base.cr.codeRedemptionMgr.redeemCode(input, self.__getCodeResult)
self.codeInput.enterText('')
self.__disableCodeEntry()
return
def __getCodeResult(self, result):
self.notify.debug('result = %s' % result)
self.__enableCodeEntry()
if result == 0:
self.resultPanel['image'] = self.resultPanelSuccessGui
self.resultPanel['text'] = TTLocalizer.CdrResultSuccess
elif result == 1:
self.resultPanel['image'] = self.resultPanelFailureGui
self.resultPanel['text'] = TTLocalizer.CdrResultInvalidCode
elif result == 2:
self.resultPanel['image'] = self.resultPanelFailureGui
self.resultPanel['text'] = TTLocalizer.CdrResultExpiredCode
elif result == 3:
self.resultPanel['image'] = self.resultPanelErrorGui
elif result == 4:
self.resultPanel['image'] = self.resultPanelErrorGui
self.resultPanel['text'] = TTLocalizer.CdrResultAlreadyRedeemed
elif result == 5:
self.resultPanel['image'] = self.resultPanelErrorGui
self.resultPanel['text'] = TTLocalizer.CdrResultNotReady
elif result == 6:
self.resultPanel['image'] = self.resultPanelErrorGui
self.resultPanel['text'] = TTLocalizer.CdrResultNotEligible
if result == 0:
self.successSfx.play()
else:
self.failureSfx.play()
self.resultPanel.show()
def __hideResultPanel(self):
self.resultPanel.hide()
def __disableCodeEntry(self):
self.codeInput['state'] = DGG.DISABLED
self.submitButton['state'] = DGG.DISABLED
def __enableCodeEntry(self):
self.codeInput['state'] = DGG.NORMAL
self.codeInput['focus'] = 1
self.submitButton['state'] = DGG.NORMAL
class SpecialOptionsTabPage(DirectFrame):
notify = directNotify.newCategory('SpecialOptionsTabPage')
def __init__(self, parent = aspect2d):
self._parent = parent
self.currentSizeIndex = None
DirectFrame.__init__(self, parent=self._parent, relief=None, pos=(0.0, 0.0, 0.0), scale=(1.0, 1.0, 1.0))
self.load()
def destroy(self):
self._parent = None
DirectFrame.destroy(self)
def load(self):
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
circleModel = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_nameShop')
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
titleHeight = 0.61
textStartHeight = 0.45
textRowHeight = 0.145
leftMargin = -0.72
buttonbase_xcoord = 0.35
buttonbase_ycoord = 0.45
button_image_scale = (0.7, 1, 1)
button_textpos = (0, -0.02)
options_text_scale = 0.052
disabled_arrow_color = Vec4(0.6, 0.6, 0.6, 1.0)
self.speed_chat_scale = 0.055
self.WASD_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft, text_scale=options_text_scale, text_wordwrap=16, pos=(leftMargin, 0, textStartHeight - textRowHeight))
self.WASD_toggleButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=button_image_scale, text='', text_scale=options_text_scale, text_pos=button_textpos, pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight), command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=button_image_scale, text='Change Keybinds', text_scale=(0.03, 0.05, 1), text_pos=button_textpos, pos=(buttonbase_xcoord + 0.0, 0.0, buttonbase_ycoord), command=self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(1.55, 1.0, 1.0)
self.newGui_Label = DirectLabel(parent=self, relief=None, text='', text_align=TextNode.ALeft,
text_scale=options_text_scale, text_wordwrap=16,
pos=(leftMargin, 0, textStartHeight - textRowHeight - 0.1))
self.newGui_toggleButton = DirectButton(parent=self, relief=None, image=(
guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')),
image_scale=button_image_scale, text='', text_scale=options_text_scale,
text_pos=button_textpos,
pos=(buttonbase_xcoord, 0.0, buttonbase_ycoord - textRowHeight - 0.1),
command=self.__doToggleNewGui)
guiButton.removeNode()
circleModel.removeNode()
def enter(self):
self.show()
self.settingsChanged = 0
self.__setWASDButton()
self.__setNewGuiButton()
def exit(self):
self.ignoreAll()
self.hide()
def unload(self):
self.WASD_Label.destroy()
del self.WASD_Label
self.WASD_toggleButton.destroy()
del self.WASD_toggleButton
self.keymapDialogButton.destroy()
del self.keymapDialogButton
def __doToggleNewGui(self):
FeatureComingSoonDialog.FeatureComingSoonDialog()
#if settings['newGui'] == True:
# settings['newGui'] = False
# base.localAvatar.setSystemMessage(0, 'Old Battle GUI is toggled for activation, log back in to see effects.')
#else:
# settings['newGui'] = True
# base.localAvatar.setSystemMessage(0, 'New Battle GUI is toggled for activation, log back in to see effects.')
#self.settingsChanged = 1
#self.__setNewGuiButton()
def __setNewGuiButton(self):
if settings['newGui'] == True:
self.newGui_Label['text'] = 'Using the New Battle GUI.'
self.newGui_toggleButton['text'] = 'Toggle'
else:
self.newGui_Label['text'] = 'Using the Classic Battle GUI.'
self.newGui_toggleButton['text'] = 'Toggle'
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
base.localAvatar.controlManager.reload()
base.localAvatar.chatMgr.reloadWASD()
base.localAvatar.controlManager.disable()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is enabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is disabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
| silly-wacky-3-town-toon/SOURCE-COD | toontown/shtiker/OptionsPage.py | Python | apache-2.0 | 38,922 |
import tensorflow as tf
m1 = tf.constant([[1., 2.]])
m2 = tf.constant([[1],
[2]])
m3 = tf.constant([ [[1,2],
[3,4],
[5,6]],
[[7,8],
[9,10],
[11,12]] ])
print(m1)
print(m2)
print(m3)
# 500 x 500 tensor
print(tf.ones([500, 500]))
# 500 x 500 tensor with 0.5 value
print(tf.ones([500, 500]) * 0.5)
| saramic/learning | data/tensorflow/src/2_4_creating_tensors.py | Python | unlicense | 405 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2013 Yaroslav Halchenko"
__license__ = "GPL"
import logging
import os
import re
import sys
import time
import unittest
from StringIO import StringIO
from ..server.mytime import MyTime
from ..helpers import getLogger
logSys = getLogger(__name__)
CONFIG_DIR = os.environ.get('FAIL2BAN_CONFIG_DIR', None)
if not CONFIG_DIR:
# Use heuristic to figure out where configuration files are
if os.path.exists(os.path.join('config','fail2ban.conf')):
CONFIG_DIR = 'config'
else:
CONFIG_DIR = '/etc/fail2ban'
def mtimesleep():
# no sleep now should be necessary since polling tracks now not only
# mtime but also ino and size
pass
old_TZ = os.environ.get('TZ', None)
def setUpMyTime():
# Set the time to a fixed, known value
# Sun Aug 14 12:00:00 CEST 2005
# yoh: we need to adjust TZ to match the one used by Cyril so all the timestamps match
os.environ['TZ'] = 'Europe/Zurich'
time.tzset()
MyTime.setTime(1124013600)
def tearDownMyTime():
os.environ.pop('TZ')
if old_TZ:
os.environ['TZ'] = old_TZ
time.tzset()
MyTime.myTime = None
def gatherTests(regexps=None, no_network=False):
# Import all the test cases here instead of a module level to
# avoid circular imports
from . import banmanagertestcase
from . import clientreadertestcase
from . import failmanagertestcase
from . import filtertestcase
from . import servertestcase
from . import datedetectortestcase
from . import actiontestcase
from . import actionstestcase
from . import sockettestcase
from . import misctestcase
from . import databasetestcase
from . import samplestestcase
from . import fail2banregextestcase
if not regexps: # pragma: no cover
tests = unittest.TestSuite()
else: # pragma: no cover
class FilteredTestSuite(unittest.TestSuite):
_regexps = [re.compile(r) for r in regexps]
def addTest(self, suite):
suite_str = str(suite)
for r in self._regexps:
if r.search(suite_str):
super(FilteredTestSuite, self).addTest(suite)
return
tests = FilteredTestSuite()
# Server
#tests.addTest(unittest.makeSuite(servertestcase.StartStop))
tests.addTest(unittest.makeSuite(servertestcase.Transmitter))
tests.addTest(unittest.makeSuite(servertestcase.JailTests))
tests.addTest(unittest.makeSuite(servertestcase.RegexTests))
tests.addTest(unittest.makeSuite(servertestcase.LoggingTests))
tests.addTest(unittest.makeSuite(actiontestcase.CommandActionTest))
tests.addTest(unittest.makeSuite(actionstestcase.ExecuteActions))
# FailManager
tests.addTest(unittest.makeSuite(failmanagertestcase.AddFailure))
# BanManager
tests.addTest(unittest.makeSuite(banmanagertestcase.AddFailure))
try:
import dns
tests.addTest(unittest.makeSuite(banmanagertestcase.StatusExtendedCymruInfo))
except ImportError:
pass
# ClientReaders
tests.addTest(unittest.makeSuite(clientreadertestcase.ConfigReaderTest))
tests.addTest(unittest.makeSuite(clientreadertestcase.JailReaderTest))
tests.addTest(unittest.makeSuite(clientreadertestcase.FilterReaderTest))
tests.addTest(unittest.makeSuite(clientreadertestcase.JailsReaderTest))
tests.addTest(unittest.makeSuite(clientreadertestcase.JailsReaderTestCache))
# CSocket and AsyncServer
tests.addTest(unittest.makeSuite(sockettestcase.Socket))
tests.addTest(unittest.makeSuite(sockettestcase.ClientMisc))
# Misc helpers
tests.addTest(unittest.makeSuite(misctestcase.HelpersTest))
tests.addTest(unittest.makeSuite(misctestcase.SetupTest))
tests.addTest(unittest.makeSuite(misctestcase.TestsUtilsTest))
tests.addTest(unittest.makeSuite(misctestcase.CustomDateFormatsTest))
# Database
tests.addTest(unittest.makeSuite(databasetestcase.DatabaseTest))
# Filter
tests.addTest(unittest.makeSuite(filtertestcase.IgnoreIP))
tests.addTest(unittest.makeSuite(filtertestcase.BasicFilter))
tests.addTest(unittest.makeSuite(filtertestcase.LogFile))
tests.addTest(unittest.makeSuite(filtertestcase.LogFileMonitor))
tests.addTest(unittest.makeSuite(filtertestcase.LogFileFilterPoll))
if not no_network:
tests.addTest(unittest.makeSuite(filtertestcase.IgnoreIPDNS))
tests.addTest(unittest.makeSuite(filtertestcase.GetFailures))
tests.addTest(unittest.makeSuite(filtertestcase.DNSUtilsTests))
tests.addTest(unittest.makeSuite(filtertestcase.JailTests))
# DateDetector
tests.addTest(unittest.makeSuite(datedetectortestcase.DateDetectorTest))
# Filter Regex tests with sample logs
tests.addTest(unittest.makeSuite(samplestestcase.FilterSamplesRegex))
# bin/fail2ban-regex
tests.addTest(unittest.makeSuite(fail2banregextestcase.Fail2banRegexTest))
#
# Python action testcases
#
testloader = unittest.TestLoader()
from . import action_d
for file_ in os.listdir(
os.path.abspath(os.path.dirname(action_d.__file__))):
if file_.startswith("test_") and file_.endswith(".py"):
if no_network and file_ in ['test_badips.py','test_smtp.py']: #pragma: no cover
# Test required network
continue
tests.addTest(testloader.loadTestsFromName(
"%s.%s" % (action_d.__name__, os.path.splitext(file_)[0])))
#
# Extensive use-tests of different available filters backends
#
from ..server.filterpoll import FilterPoll
filters = [FilterPoll] # always available
# Additional filters available only if external modules are available
# yoh: Since I do not know better way for parametric tests
# with good old unittest
try:
from ..server.filtergamin import FilterGamin
filters.append(FilterGamin)
except Exception, e: # pragma: no cover
logSys.warning("Skipping gamin backend testing. Got exception '%s'" % e)
try:
from ..server.filterpyinotify import FilterPyinotify
filters.append(FilterPyinotify)
except Exception, e: # pragma: no cover
logSys.warning("I: Skipping pyinotify backend testing. Got exception '%s'" % e)
for Filter_ in filters:
tests.addTest(unittest.makeSuite(
filtertestcase.get_monitor_failures_testcase(Filter_)))
try: # pragma: systemd no cover
from ..server.filtersystemd import FilterSystemd
tests.addTest(unittest.makeSuite(filtertestcase.get_monitor_failures_journal_testcase(FilterSystemd)))
except Exception, e: # pragma: no cover
logSys.warning("I: Skipping systemd backend testing. Got exception '%s'" % e)
# Server test for logging elements which break logging used to support
# testcases analysis
tests.addTest(unittest.makeSuite(servertestcase.TransmitterLogging))
return tests
# forwards compatibility of unittest.TestCase for some early python versions
if not hasattr(unittest.TestCase, 'assertIn'):
def __assertIn(self, a, b, msg=None):
if a not in b: # pragma: no cover
self.fail(msg or "%r was not found in %r" % (a, b))
unittest.TestCase.assertIn = __assertIn
def __assertNotIn(self, a, b, msg=None):
if a in b: # pragma: no cover
self.fail(msg or "%r was found in %r" % (a, b))
unittest.TestCase.assertNotIn = __assertNotIn
class LogCaptureTestCase(unittest.TestCase):
def setUp(self):
# For extended testing of what gets output into logging
# system, we will redirect it to a string
logSys = getLogger("fail2ban")
# Keep old settings
self._old_level = logSys.level
self._old_handlers = logSys.handlers
# Let's log everything into a string
self._log = StringIO()
logSys.handlers = [logging.StreamHandler(self._log)]
if self._old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
logSys.handlers += self._old_handlers
logSys.setLevel(getattr(logging, 'DEBUG'))
def tearDown(self):
"""Call after every test case."""
# print "O: >>%s<<" % self._log.getvalue()
logSys = getLogger("fail2ban")
logSys.handlers = self._old_handlers
logSys.level = self._old_level
def _is_logged(self, s):
return s in self._log.getvalue()
def assertLogged(self, *s):
"""Assert that one of the strings was logged
Preferable to assertTrue(self._is_logged(..)))
since provides message with the actual log.
Parameters
----------
s : string or list/set/tuple of strings
Test should succeed if string (or any of the listed) is present in the log
"""
logged = self._log.getvalue()
for s_ in s:
if s_ in logged:
return
raise AssertionError("None among %r was found in the log: %r" % (s, logged))
def assertNotLogged(self, *s):
"""Assert that strings were not logged
Parameters
----------
s : string or list/set/tuple of strings
Test should succeed if the string (or at least one of the listed) is not
present in the log
"""
logged = self._log.getvalue()
for s_ in s:
if s_ not in logged:
return
raise AssertionError("All of the %r were found present in the log: %r" % (s, logged))
def getLog(self):
return self._log.getvalue()
def printLog(self):
print(self._log.getvalue())
# Solution from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
# under cc by-sa 3.0
if os.name == 'posix':
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
else:
def pid_exists(pid):
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
else:
return False
# Python 2.6 compatibility. in 2.7 assertDictEqual
def assert_dict_equal(a, b):
assert isinstance(a, dict), "Object is not dictionary: %r" % a
assert isinstance(b, dict), "Object is not dictionary: %r" % b
assert a==b, "Dictionaries differ:\n%r !=\n%r" % (a, b)
| Eyepea/fail2ban | fail2ban/tests/utils.py | Python | gpl-2.0 | 10,517 |
# -*- coding: UTF-8 -*-
# Copyright 2018-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.projects.std.settings import *
from lino.api import _
class Site(Site):
verbose_name = "Lino Presto"
url = "http://presto.lino-framework.org"
# demo_fixtures = 'std demo minimal_ledger euvatrates demo_bookings payments demo2'.split()
# demo_fixtures = 'std demo minimal_ledger demo_bookings payments demo2'.split()
# demo_fixtures = 'std minimal_ledger demo demo2'.split()
demo_fixtures = 'std minimal_ledger demo demo_bookings demo2 checkdata'
languages = 'en de fr'
textfield_format = 'html'
obj2text_template = "**{0}**"
project_model = 'presto.Client'
workflows_module = 'lino_presto.lib.presto.workflows'
custom_layouts_module = 'lino_presto.lib.presto.layouts'
user_types_module = 'lino_presto.lib.presto.user_types'
auto_configure_logger_names = "atelier django lino lino_xl lino_presto"
default_build_method = 'weasy2pdf'
textfield_bleached = True
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
# yield 'lino.modlib.gfks'
yield 'lino_presto.lib.users'
yield 'lino_presto.lib.contacts'
yield 'lino_xl.lib.uploads'
yield 'lino_presto.lib.cal'
yield 'lino_presto.lib.ledger'
yield 'lino_presto.lib.orders'
yield 'lino.modlib.dashboard'
yield 'lino_xl.lib.calview'
yield 'lino_xl.lib.countries'
# yield 'lino_xl.lib.properties'
yield 'lino_xl.lib.clients'
yield 'lino_xl.lib.households'
# yield 'lino_xl.lib.lists'
yield 'lino_xl.lib.addresses'
yield 'lino_xl.lib.phones'
yield 'lino_xl.lib.humanlinks',
yield 'lino_xl.lib.topics'
# yield 'lino_xl.lib.extensible'
yield 'lino_xl.lib.healthcare'
yield 'lino_presto.lib.products'
yield 'lino_presto.lib.sales'
# yield 'lino_xl.lib.vat'
yield 'lino_presto.lib.invoicing'
yield 'lino_xl.lib.sepa'
# yield 'lino_xl.lib.finan'
# yield 'lino_xl.lib.bevats'
# yield 'lino_xl.lib.ana'
# yield 'lino_xl.lib.sheets'
yield 'lino_xl.lib.notes'
# yield 'lino_xl.lib.skills'
# yield 'lino.modlib.uploads'
yield 'lino_xl.lib.excerpts'
yield 'lino_xl.lib.appypod'
yield 'lino.modlib.export_excel'
yield 'lino.modlib.checkdata'
yield 'lino.modlib.tinymce'
yield 'lino.modlib.weasyprint'
yield 'lino_presto.lib.presto'
def get_plugin_configs(self):
yield super(Site, self).get_plugin_configs()
yield ('healthcare', 'client_model', 'presto.Client')
yield ('topics', 'menu_group', 'contacts')
yield ('countries', 'country_code', 'BE')
yield ('clients', 'client_model', 'presto.Client')
yield ('clients', 'menu_group', 'contacts')
yield ('orders', 'worker_model', 'contacts.Worker')
# yield ('ledger', 'purchase_stories', False)
yield ('ledger', 'sales_stories', False)
# yield ('cal', 'default_guest_state', 'invited')
yield ('calview', 'params_layout', "state project project__municipality event_type room")
yield ('clients', 'demo_coach', 'martha')
def setup_quicklinks(self, user, tb):
super(Site, self).setup_quicklinks(user, tb)
tb.add_action(self.models.contacts.Workers)
tb.add_action(self.models.presto.Clients)
# for a in (self.models.calview.WeeklyView, self.models.contacts.WeeklyView):
# tb.add_instance_action(
# a.get_row_by_pk(None, "0"), action=a.default_action, label=a.label)
for p in self.models.calview.Planners.get_list_items():
a = p.weekly_view
tb.add_instance_action(
a.get_row_by_pk(None, "0"), action=a.default_action, label=p.text)
| lsaffre/presto | lino_presto/lib/presto/settings.py | Python | agpl-3.0 | 3,982 |
from app import cache, celery
from app.factory import create_app
app = create_app(celery, cache)
if __name__ == '__main__':
app.run(debug=True)
| Djaler/VkGraph | run_app.py | Python | mit | 150 |
# -*- coding: utf-8
# @mtrpires
# Raspador de dados do Bolsa Família no Portal da Transparência do governo federal brasileiro
# http://www.portaltransparencia.gov.br
#
import os
from raspafamiliaFunctions import getEstado
from raspafamiliaFunctions import setParamsEstados
from raspafamiliaFunctions import salvaMunicipios
from raspafamiliaFunctions import salvaMunCSV
from raspafamiliaFunctions import changePage
from raspafamiliaFunctions import salvaSopa
from raspafamiliaFunctions import numeroPaginas
from raspafamiliaFunctions import criaPastaEstado
from raspafamiliaFunctions import setParamsMunicipios
from raspafamiliaFunctions import salvaMunicipios
from raspafamiliaFunctions import listaMunicipios
from raspafamiliaFunctions import salvaMunCSV
from raspafamiliaFunctions import salvaFavorecidos
from raspafamiliaFunctions import salvaFavCSV
from random import uniform
from time import sleep
urlEstados = 'http://www.portaltransparencia.gov.br/PortalTransparenciaPesquisaAcaoMunicipio.asp?'
urlMunicipios = 'http://www.portaltransparencia.gov.br/PortalTransparenciaPesquisaAcaoFavorecido.asp?'
ano = 2014
estados = [
'BA', 'DF', 'PR', 'RS', 'TO', 'PA', 'PE', 'RN', 'RO',
'RJ', 'AC', 'AM', 'AL', 'CE', 'AP', 'GO', 'ES', 'MG',
'PI', 'MA', 'SP', 'MT', 'MS', 'SC', 'SE', 'RR', 'PB'\
]
#estados = ['PA']
for siglaEstado in estados:
print "Passando pelo estado", siglaEstado
paramsEstados = setParamsEstados(ano, siglaEstado)
sopaEstados = salvaSopa(urlEstados, paramsEstados)
print "Sopa estadual pronta!", type(sopaEstados)
resultsEstados = numeroPaginas(sopaEstados)
print "Encontrei", resultsEstados, "páginas de estados. Vamos lá?"
for estado in range(resultsEstados+1)[1:]:
print "Estou na página (%s)[%d/%s]" % (siglaEstado, estado, resultsEstados)
#randomSleep = uniform(1, 3)
print "Salvando HTML com municípios..."
municipios = salvaMunicipios(sopaEstados)
print "HTML salvo!"
salvaMunCSV(siglaEstado, municipios)
print "Gerando lista de municípios para páginapágina (%s)[%d/%s]" % (siglaEstado, estado, resultsEstados)
listaCidades = listaMunicipios(municipios)
print "Essa é a lista de municípios:", listaCidades
for cidade in listaCidades:
print "Passando pela cidade", cidade
paramsMunicipios = setParamsMunicipios(ano, cidade, municipios)
sopaMunicipios = salvaSopa(urlMunicipios, paramsMunicipios)
print "Sopa municipal pronta!", type(sopaMunicipios)
resultsMunicipios = numeroPaginas(sopaMunicipios)
print "Encontrei", resultsMunicipios, "páginas para o município %s. Vamos lá?" % cidade.encode('utf-8')
for municipio in range(resultsMunicipios+1)[1:]:
print "Estou na página (%s)[%d/%s]" % (cidade.encode('utf-8'), municipio, resultsMunicipios)
favorecidos = salvaFavorecidos(sopaMunicipios)
salvaFavCSV(siglaEstado, cidade, favorecidos)
paramsMunicipios = changePage(paramsMunicipios)
sopaMunicipios = salvaSopa(urlMunicipios, paramsMunicipios)
#print "Aguardando próxima página por", randomSleep, "segundos."
#sleep(randomSleep)
paramsEstados = changePage(paramsEstados)
print "Mudando de página estadual..."
sopaEstados = salvaSopa(urlEstados, paramsEstados)
| mtrpires/raspafamilia | raspador.py | Python | gpl-3.0 | 3,509 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-12 00:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20160411_2108'),
]
operations = [
migrations.AddField(
model_name='item',
name='item_finished',
field=models.BooleanField(default=False),
),
]
| eespaillat/RoommateManagerApp | roommatemanager/api/migrations/0003_item_item_finished.py | Python | mit | 452 |
'''
A little module for creating hierarchical word clusters.
This is based loosely on the following paper.
Peter F. Brown; Peter V. deSouza; Robert L. Mercer; T. J. Watson; Vincent J.
Della Pietra; Jenifer C. Lai. 1992. Class-Based n-gram Models of Natural
Language. Computational Linguistics, Volume 18, Number 4.
http://acl.ldc.upenn.edu/J/J92/J92-4003.pdf
While this code creates hierarchical clusters, it does not use an HMM-like
sequence model to do so (Brown et al., 1992, section 3). Instead, it merges
clusters simply by picking the pairs of clusters with the highest pointwise
mutual information. Instead of using a window (e.g., as in Brown et al., sec. 4),
this code computed PMI using the probability that two randomly selected clusters
from the same document will be c1 and c2. Also, since the total numbers of
cluster tokens and pairs are constant across pairs, this code use counts
instead of probabilities. Thus, the score for merging two clusters
c1 and c2 is the following:
log[count(two tokens in the same doc are in c1 in c2) / count(c1) / count(c2)]
* See http://www.cs.columbia.edu/~cs4705/lectures/brown.pdf for a nice
overview of Brown clustering.
* Here is another implementation of Brown clustering:
https://github.com/percyliang/brown-cluster
* Also, see Percy Liang's Master's Thesis:
Percy Liang. 2005. Semi-supervised learning for natural language. MIT.
http://cs.stanford.edu/~pliang/papers/meng-thesis.pdf
Author: Michael Heilman ([email protected], [email protected])
'''
import argparse
import glob
import re
import itertools
import logging
from collections import defaultdict
import random
random.seed(1234567890)
from math import log
from bs4 import UnicodeDammit
logging.basicConfig(level=logging.INFO, format='%(asctime)s\t%(message)s')
def document_generator(path, lower=False):
'''
Default document reader. Takes a path to a file with one document per line,
with tokens separate by whitespace, and yields lists of tokens per document.
This could be replaced by any function that yields lists of tokens.
See main() for how it is called.
Note: this uses BeautifulSoup's UnicodeDammit to convert to unicode.
'''
with open(path, 'rb') as f:
i = 0
for line in f:
line = UnicodeDammit(line.strip()).unicode_markup
if line:
if lower:
line = line.lower()
i += 1
if i % 100000 == 0:
logging.info('Read {} nonblank lines'.format(i))
yield re.split(r'\s+', line)
def test_doc_gen():
docs = ['dog cat bird bat whale monkey',
'monkey human ape',
'human man woman child',
'fish whale shark',
'man woman teacher lawyer doctor',
'fish shark',
'bird bat fly']
return map(str.split, docs)
def make_float_defaultdict():
return defaultdict(float)
def make_word_counts(document_generator, max_vocab_size=None, min_word_count=1):
res = defaultdict(int)
for doc in document_generator:
for tok in doc:
res[tok] += 1
too_rare = min_word_count - 1
if min_word_count > 1 and max_vocab_size is not None:
logging.info("max_vocab_size and min_word_count both set." +
" Ignoring min_word_count.".format(too_rare))
words = sorted(res.keys(), key=lambda w: res[w], reverse=True)
if max_vocab_size is not None:
if len(words) <= max_vocab_size:
too_rare = 0
else:
too_rare = res[words[max_vocab_size]]
if too_rare == res[words[0]]:
too_rare += 1
logging.info("max_vocab_size too low. Using all words" +
"that appeared > {} times.".format(too_rare))
# only keep words that occur more frequently than too_rare
words = [w for w in words if res[w] > too_rare]
logging.info("Created vocabulary with the {} words that occurred at least {} times."
.format(len(words), too_rare + 1))
words_set = set(words)
wc_keys = list(res.keys())
for key in wc_keys:
if key not in words_set:
del res[key]
return res
class DocumentLevelClusters(object):
'''
Class for generating word clusters based on document-level co-occurence.
The initializer takes a document generator, which is simply an iterator
over lists of tokens. You can define this however you wish.
word_counts should be a dictionary of int counts that determines which words
to consider.
'''
def __init__(self, doc_generator, word_counts, batch_size=1000):
self.batch_size = batch_size
self.num_docs = 0
# mapping from cluster IDs to cluster IDs,
# to keep track of the hierarchy
self.cluster_parents = {}
self.cluster_counter = 0
# cluster_id -> {doc_id -> counts}
self.index = defaultdict(dict)
# the list of words in the vocabulary and their counts
self.word_counts = word_counts
# find the most frequent words
self.words = sorted(self.word_counts.keys(),
key=lambda w: self.word_counts[w], reverse=True)
# make a copy of the list of words, as a queue for making new clusters
word_queue = list(self.words)
# the 0/1 bit to add when walking up the hierarchy
# from a word to the top-level cluster
self.cluster_bits = {}
# create sets of documents that each word appears in
self.create_index(doc_generator)
# score potential clusters, starting with the most frequent words.
# also, remove the batch from the queue
self.current_batch = word_queue[:(self.batch_size + 1)]
self.current_batch_scores = defaultdict(make_float_defaultdict)
self.make_pair_scores(itertools.combinations(self.current_batch, 2))
word_queue = word_queue[(self.batch_size + 1):]
while len(self.current_batch) > 1:
# find the best pair of words/clusters to merge
c1, c2 = self.find_best()
# merge the clusters in the index
self.merge(c1, c2)
# remove the merged clusters from the batch, add the new one
# and the next most frequent word (if available)
self.update_batch(c1, c2, word_queue)
logging.info('{} AND {} WERE MERGED INTO {}. {} REMAIN.'
.format(c1, c2, self.cluster_counter,
len(self.current_batch) + len(word_queue) - 1))
self.cluster_counter += 1
def create_index(self, doc_generator):
doc_id = 0
for doc in doc_generator:
for w in doc:
if w not in self.word_counts:
continue
if doc_id not in self.index[w]:
self.index[w][doc_id] = 0
self.index[w][doc_id] += 1
doc_id += 1
self.num_docs = doc_id
logging.info('{} documents were indexed.'.format(self.num_docs))
def make_pair_scores(self, pair_iter):
for c1, c2 in pair_iter:
paircount = 0
index1, index2 = self.index[c1], self.index[c2]
if len(index1) > len(index2):
index1, index2 = index2, index1
for doc_id in index1:
if doc_id not in index2:
continue
paircount += index1[doc_id] * index2[doc_id]
if paircount == 0:
self.current_batch_scores[c1][c2] = float('-inf') # log(0)
continue
# note that these counts are ints!
# (but the log function returns floats)
score = log(paircount) \
- log(self.word_counts[c1]) \
- log(self.word_counts[c2])
self.current_batch_scores[c1][c2] = score
def find_best(self):
best_score = None
argmax = None
for c1, d in self.current_batch_scores.items():
for c2, score in d.items():
if best_score is None or score > best_score:
argmax = [(c1, c2)]
best_score = score
elif score == best_score:
argmax.append((c1, c2))
# break ties randomly (randint takes inclusive args!)
c1, c2 = argmax[random.randint(0, len(argmax) - 1)]
return c1, c2
def merge(self, c1, c2):
c_new = self.cluster_counter
self.cluster_parents[c1] = c_new
self.cluster_parents[c2] = c_new
r = random.randint(0, 1)
self.cluster_bits[c1] = str(r) # assign bits randomly
self.cluster_bits[c2] = str(1 - r)
# initialize the document counts of the new cluster with the counts
# for one of the two child clusters. then, add the counts from the
# other child cluster
self.index[c_new] = self.index[c1]
for doc_id in self.index[c2]:
if doc_id not in self.index[c_new]:
self.index[c_new][doc_id] = 0
self.index[c_new][doc_id] += self.index[c2][doc_id]
# sum the frequencies of the child clusters
self.word_counts[c_new] = self.word_counts[c1] + self.word_counts[c2]
# remove merged clusters from the index to save memory
# (but keep frequencies for words for the final output)
del self.index[c1]
del self.index[c2]
if c1 not in self.words:
del self.word_counts[c1]
if c2 not in self.words:
del self.word_counts[c2]
def update_batch(self, c1, c2, freq_words):
# remove the clusters that were merged (and the scored pairs for them)
self.current_batch = [x for x in self.current_batch
if not (x == c1 or x == c2)]
for c in [c1, c2]:
if c in self.current_batch_scores:
del self.current_batch_scores[c]
for d in self.current_batch_scores.values():
if c in d:
del d[c]
# find what to add to the current batch
new_items = [self.cluster_counter]
if freq_words:
new_word = freq_words.pop(0)
new_items.append(new_word)
# add to the batch and score the new cluster pairs that result
self.make_pair_scores(itertools.product(new_items, self.current_batch))
self.make_pair_scores(itertools.combinations(new_items, 2))
# note: make the scores first with itertools.product
# (before adding new_items to current_batch) to avoid duplicates
self.current_batch.extend(new_items)
def get_bitstring(self, w):
# walk up the cluster hierarchy until there is no parent cluster
cur_cluster = w
bitstring = ""
while cur_cluster in self.cluster_parents:
bitstring = self.cluster_bits[cur_cluster] + bitstring
cur_cluster = self.cluster_parents[cur_cluster]
return bitstring
def save_clusters(self, output_path):
with open(output_path, 'w') as f:
for w in self.words:
f.write("{}\t{}\t{}\n".format(w, self.get_bitstring(w),
self.word_counts[w]))
def main():
parser = argparse.ArgumentParser(description='Create hierarchical word' +
' clusters from a corpus, following' +
' Brown et al. (1992).')
parser.add_argument('input_path', help='input file, one document per' +
' line, with whitespace-separated tokens.')
parser.add_argument('output_path', help='output path')
parser.add_argument('--max_vocab_size', help='maximum number of words in' +
' the vocabulary (a smaller number will be used if' +
' there are ties at the specified level)',
default=None, type=int)
parser.add_argument('--min_word_count', help='minimum word count to' +
'include a word in the vocabulary. (default: 1)',
default=1, type=int)
parser.add_argument('--batch_size', help='number of clusters to merge at' +
' one time (runtime is quadratic in this value)',
default=1000, type=int)
parser.add_argument('--lower', help='lowercase the input',
action='store_true')
args = parser.parse_args()
word_counts = make_word_counts(document_generator(args.input_path, lower=args.lower),
max_vocab_size=args.max_vocab_size,
min_word_count=args.min_word_count)
c = DocumentLevelClusters(document_generator(args.input_path, lower=args.lower),
word_counts, batch_size=args.batch_size)
c.save_clusters(args.output_path)
if __name__ == '__main__':
main()
| ronaldahmed/labor-market-demand-analysis | shallow parsing models/word_clustering/pmi_cluster_outdated.py | Python | mit | 13,453 |
#!/usr/bin/python
# Copyright (c) 2005--2010 Red Hat, Inc.
#
#
#
# $Id$
raise Exception("""
This test is no more valid; see the bug
https://bugzilla.redhat.com/show_bug.cgi?id=423351
""")
import os
import unittest
from spacewalk.server import rhnSQL
DB = 'rhnuser/rhnuser@webdev'
class ExecutemanyTest(unittest.TestCase):
def setUp(self):
self.table_name = "misatest_%d" % os.getpid()
rhnSQL.initDB(DB)
self._cleanup()
rhnSQL.execute("create table %s (id int, val varchar2(10))" %
self.table_name)
def _cleanup(self):
try:
rhnSQL.execute("drop table %s" % self.table_name)
except rhnSQL.SQLStatementPrepareError:
pass
def tearDown(self):
self._cleanup()
rhnSQL.commit()
def test_executemany(self):
"""
Tests the case of passing an integer as a value into a VARCHAR2 column
(executemany makes it more interesting because the driver generally
verifies the param types; passing a string and an Int takes it one
step further)
"""
h = rhnSQL.prepare("""
insert into %s (id, val) values (:id, :val)
""" % self.table_name)
params = {
'id' : [1, 2],
'val' : ['', 3],
}
apply(h.executemany, (), params)
h = rhnSQL.prepare("select id, val from %s" % self.table_name)
h.execute()
rows = h.fetchall_dict()
self.assertEqual(len(rows), 2)
v_id, v_val = rows[0]['id'], rows[0]['val']
self.assertEqual(v_id, 1)
self.assertEqual(v_val, None)
v_id, v_val = rows[1]['id'], rows[1]['val']
self.assertEqual(v_id, 2)
self.assertEqual(v_val, '3')
if __name__ == '__main__':
unittest.main()
| colloquium/spacewalk | backend/server/test/unit-test/rhnSQL/test_executemany.py | Python | gpl-2.0 | 1,833 |
# This file is part of Trackma.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import signal
import sys
from trackma.ui.gtk.application import TrackmaApplication
from trackma import utils
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
print("Trackma-gtk v{}".format(utils.VERSION))
app = TrackmaApplication()
sys.exit(app.run(sys.argv))
if __name__ == '__main__':
main()
| Hairo/trackma | trackma/ui/gtk/main.py | Python | gpl-3.0 | 998 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.