code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""
Unit test for the repr and pprint of parameterized objects.
"""
import param
from . import API1TestCase
class TestParameterizedRepr(API1TestCase):
def setUp(self):
super(TestParameterizedRepr, self).setUp()
# initialize a parameterized class
class A(param.Parameterized):
a = param.Number(4, precedence=-5)
b = param.String('B', precedence=-4)
c = param.Number(4, precedence=0)
d = param.Integer(-22, precedence=1)
x = param.Number(1, precedence=2)
y = param.Number(2, precedence=-1)
z = param.Number(3, precedence=-2)
def __init__(self, a, b, c=4, d=-22, **kwargs):
super(A, self).__init__(a=a, b=b, c=c, **kwargs)
self.A = A
class B(param.Parameterized): # Similar to A but no **kwargs
a = param.Number(4, precedence=-5)
b = param.String('B', precedence=-4)
c = param.Number(4, precedence=0)
d = param.Integer(-22, precedence=1)
x = param.Number(1, precedence=2)
def __init__(self, a, b, c=4, d=-22):
super(B, self).__init__(a=a, b=b, c=c, name='ClassB')
self.B = B
class C(param.Parameterized): # Similar to A but with *varargs
a = param.Number(4, precedence=-5)
b = param.String('B', precedence=-4)
c = param.Number(4, precedence=0)
d = param.Integer(-22, precedence=1)
x = param.Number(1, precedence=2)
y = param.Number(2, precedence=-1)
z = param.Number(3, precedence=-2)
def __init__(self, a, b, c=4, d=-22, *varargs, **kwargs):
super(C, self).__init__(a=a, b=b, c=c, **kwargs)
self.C = C
class D(param.Parameterized): # Similar to A but with missing parameters
a = param.Number(4, precedence=-5)
b = param.String('B', precedence=-4)
def __init__(self, a, b, c=4, d=-22, **kwargs):
super(D, self).__init__(a=a, b=b, **kwargs)
self.D = D
# More realistically, positional args are not params
class E(param.Parameterized):
a = param.Number(4, precedence=-5)
def __init__(self, p, q=4, **params): # (plus non-param kw too)
super(E, self).__init__(**params)
self.E = E
def testparameterizedrepr(self):
obj = self.A(4,'B', name='test1')
self.assertEqual(repr(obj),
"A(a=4, b='B', c=4, d=-22, name='test1', x=1, y=2, z=3)")
def testparameterizedscriptrepr1(self):
obj = self.A(4,'B', name='test')
self.assertEqual(obj.pprint(),
"A(4, 'B', name='test')")
def testparameterizedscriptrepr2(self):
obj = self.A(4,'B', c=5, name='test')
self.assertEqual(obj.pprint(),
"A(4, 'B', c=5, name='test')")
def testparameterizedscriptrepr3(self):
obj = self.A(4,'B', c=5, x=True, name='test')
self.assertEqual(obj.pprint(),
"A(4, 'B', c=5, name='test')")
def testparameterizedscriptrepr4(self):
obj = self.A(4,'B', c=5, x=10, name='test')
self.assertEqual(obj.pprint(),
"A(4, 'B', c=5, name='test', x=10)")
def testparameterizedscriptrepr5(self):
obj = self.A(4,'B', x=10, y=11, z=12, name='test')
self.assertEqual(obj.pprint(),
"A(4, 'B', name='test', z=12, y=11, x=10)")
def testparameterizedscriptrepr_nokwargs(self):
obj = self.B(4,'B', c=99)
obj.x = 10 # Modified but not passable through constructor
self.assertEqual(obj.pprint(),
"B(4, 'B', c=99)")
def testparameterizedscriptrepr_varags(self):
obj = self.C(4,'C', c=99)
self.assertEqual(obj.pprint(),
"C(4, 'C', c=99, **varargs)")
def testparameterizedscriptrepr_varags_kwargs(self):
obj = self.C(4,'C', c=99, x=10, y=11, z=12)
self.assertEqual(obj.pprint(),
"C(4, 'C', c=99, z=12, y=11, x=10, **varargs)")
def testparameterizedscriptrepr_missing_values(self):
obj = self.D(4,'D', c=99)
self.assertEqual(obj.pprint(),
"D(4, 'D', c=<?>, d=<?>)")
def testparameterizedscriptrepr_nonparams(self):
obj = self.E(10,q='hi', a=99)
self.assertEqual(obj.pprint(),
"E(<?>, q=<?>, a=99)")
def test_exceptions(self):
obj = self.E(10,q='hi',a=99)
try:
obj.pprint(unknown_value=False)
except Exception:
pass
else:
raise AssertionError
def test_suppression(self):
obj = self.E(10,q='hi',a=99)
self.assertEqual(obj.pprint(unknown_value=None),
"E(a=99)")
def test_imports_deduplication(self):
obj = self.E(10,q='hi', a=99)
imports = ['import me','import me']
obj.pprint(imports=imports)
self.assertEqual(imports.count('import me'),1)
def test_qualify(self):
obj = self.E(10,q='hi', a=99)
r = "E(<?>, q=<?>, a=99)"
self.assertEqual(obj.pprint(qualify=False),
r)
self.assertEqual(obj.pprint(qualify=True),
"tests.API1.testparameterizedrepr."+r)
if __name__ == "__main__":
import nose
nose.runmodule()
| ceball/param | tests/API1/testparameterizedrepr.py | Python | bsd-3-clause | 5,541 |
# Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='The topic console auth proxy nodes listen on')
console_token_ttl = cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens')
CONSOLEAUTH_OPTS = [consoleauth_topic_opt, console_token_ttl]
def register_opts(conf):
conf.register_opts(CONSOLEAUTH_OPTS)
def list_opts():
return {'DEFAULT': CONSOLEAUTH_OPTS}
| bigswitch/nova | nova/conf/consoleauth.py | Python | apache-2.0 | 1,156 |
import unittest
import colorsys
from PySide.QtCore import SIGNAL
from PySide.QtGui import QPushButton, QApplication
class Test (QApplication) :
def __init__(self, argv) :
super(Test, self).__init__(argv)
self._called = False
def called(self):
self._called = True
class QApplicationSignalsTest(unittest.TestCase):
def testQuit(self):
app = Test([])
button = QPushButton("BUTTON")
app.connect(button, SIGNAL("clicked()"), app.called)
button.click()
self.assert_(app._called)
if __name__ == '__main__':
unittest.main()
| M4rtinK/pyside-android | tests/QtGui/bug_307.py | Python | lgpl-2.1 | 628 |
"""
Buy and sell items on the auction house.
"""
import datetime
import logging
import time
import os
import pydarkstar.logutils
import pydarkstar.itemlist
import pydarkstar.options
import pydarkstar.common
import pydarkstar.database
import pydarkstar.auction.manager
class Options(pydarkstar.options.Options):
"""
Reads options from config file, then from command line.
"""
def __init__(self):
super(Options, self).__init__(config='broker.yaml', description=__doc__)
# logging
self.verbose = False # error, info, and debug
self.silent = False # error only
# input
self.data = [] # list of itemdata
self.find = False # search for item data
# output
self.save = False # save config
# sql
self.hostname = '127.0.0.1'
self.database = 'dspdb'
self.username = 'root'
self.password = ''
self.fail = False # fail on SQL errors
# cleaning
self.clear = False # clear items sold by broker
self.all = False # clear all items
self.force = False # clear all items check
# selling
self.name = 'Zissou' # seller name
self.restock = 3600 # restock tick
self.refill = False # restock at start
# buying
self.tick = 30 # buying interval
# logging
self.add_argument('--verbose', action='store_true',
help='report debug, info, and error')
self.add_argument('--silent', action='store_true',
help='report error only')
# input
self.add_argument(dest='data', nargs='*', type=str, default=self.data,
metavar='str', help='item data CSV file(s)')
self.add_argument('--find', action='store_true',
help='search for item data files')
# output
self.add_argument('--save', action='store_true',
help='save config file (and exit)')
# sql
self.add_argument('--hostname', default=self.hostname, type=str,
metavar='str', help='SQL address')
self.add_argument('--database', default=self.database, type=str,
metavar='str', help='SQL database')
self.add_argument('--username', default=self.username, type=str,
metavar='str', help='SQL username')
self.add_argument('--password', default=self.password, type=str,
metavar='str', help='SQL password')
self.exclude('password')
self.add_argument('--fail', action='store_true',
help='fail on SQL errors')
# cleaning
self.add_argument('--clear', action='store_true',
help='clear items sold by seller')
self.add_argument('--all', action='store_true',
help='clear *all* items')
self.add_argument('--force', action='store_true',
help='clear *all* items')
# selling
self.add_argument('--name', type=str, default=self.name,
metavar='str', help='seller name')
self.add_argument('--restock', type=int, default=self.restock,
metavar='int', help='restock interval in seconds')
self.add_argument('--refill', action='store_true',
help='restock items at start and exit')
# buying
self.add_argument('--tick', type=int, default=self.tick,
metavar='int', help='buying interval in seconds')
def parse_args(self, args=None):
super(Options, self).parse_args(args)
self.data = set(self.data)
# find data files
if self.find:
found = list(pydarkstar.common.findFiles(
top=os.getcwd(), regex=r'.*\.csv', r=False, ignorecase=True))
self.data.update(found)
self.data = list(self.data)
def main():
"""
Main function.
"""
# get options
opts = Options()
opts.parse_args()
pydarkstar.logutils.basicConfig(
verbose=opts.verbose, silent=opts.silent, fname='broker.log')
logging.info('start')
# log options
opts.log_values(level=logging.INFO)
# save options
if opts.save:
opts.save = False
opts.dump()
return
# connect to database
db = pydarkstar.database.Database.pymysql(
hostname=opts.hostname,
database=opts.database,
username=opts.username,
password=opts.password,
)
# create auction house manager
manager = pydarkstar.auction.manager.Manager(db, fail=opts.fail)
manager.seller.seller_name = opts.name
manager.buyer.buyer_name = opts.name
if opts.clear:
# clear all items
if opts.all:
# really?
if not opts.force:
raise RuntimeError('clearing all items from auction house is dangerous. use --force')
else:
manager.cleaner.clear(seller=None)
# clear seller items
else:
manager.cleaner.clear(seller=manager.seller.seller)
# exit after clearing
logging.info('exit after clear')
return
# make sure there is data
if not opts.data:
raise RuntimeError('missing item data CSV!')
# load data
logging.info('loading item data...')
idata = pydarkstar.itemlist.ItemList()
for f in opts.data:
idata.loadcsv(f)
if opts.refill:
logging.info('restocking...')
manager.restockItems(itemdata=idata)
logging.info('exit after restock')
return
logging.info('starting main loop...')
start = datetime.datetime.now()
last = start
while True:
now = datetime.datetime.now()
delta = (now - last).total_seconds()
elapsed = (now - start).total_seconds()
logging.debug('time=%012.1f s last restock=%012.1f s next restock=%012.1f s',
elapsed, delta, opts.restock - delta)
if delta >= opts.restock:
logging.debug('restocking...')
manager.restockItems(itemdata=idata)
last = datetime.datetime.now()
# buy items
manager.buyItems(itemdata=idata)
# sleep until next tick
logging.debug('wait=%012.1f s', opts.tick)
time.sleep(opts.tick)
def cleanup():
logging.info('exit\n')
if __name__ == '__main__':
with pydarkstar.logutils.capture():
main()
cleanup()
| LegionXI/pydarkstar | apps/broker.py | Python | mit | 6,408 |
# coding=utf-8
__author__ = "AstroPrint Product Team <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2016-2019 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
import threading
import logging
import requests
from Queue import Queue
from astroprint.printer.manager import printerManager
from octoprint.events import eventManager, Events
from astroprint.printfiles import FileDestinations
# singleton
_instance = None
def downloadManager():
global _instance
if _instance is None:
_instance = DownloadManager()
return _instance
# download item is:
# downloadUrl : url to of the file to download
# destFile : destination file
# printFileId : Id of the print file to be downloaded,
# printFileInfo : Cloud info of the print file to be downloaded,
# progressCb : callback to report progress
# successCb : callback to report success
# errorCb : callback to report errors
class DownloadWorker(threading.Thread):
def __init__(self, manager):
self._daemon = True
self._manager = manager
self._activeRequest = None
self._canceled = False
self.activeDownload = False
super(DownloadWorker, self).__init__()
def run(self):
downloadQueue = self._manager.queue
while True:
item = downloadQueue.get()
if item == 'shutdown':
return
printFileId = item['printFileId']
printFileName = item['printFileName']
progressCb = item['progressCb']
successCb = item['successCb']
errorCb = item['errorCb']
destFile = item['destFile']
sentFromCloud = item['sentFromCloud']
printer = None
material = None
quality = None
image = None
created = None
retries = 3
if "printer" in item:
printer = item['printer']
if "material" in item:
material = item['material']
if "quality" in item:
quality = item['quality']
if "image" in item:
image = item['image']
if "created" in item:
created = item['created']
self._manager._logger.info('Download started for %s' % printFileId)
self.activeDownload = printFileId
self._canceled = False
while retries > 0:
try:
#Perform download here
r = requests.get(item['downloadUrl'], stream= True, timeout= (10.0, 8.0)) #(connect timeout, read timeout)
self._activeRequest = r
if r.status_code == 200:
content_length = float(r.headers['Content-Length'])
downloaded_size = 0.0
if not self._canceled:
with open(destFile, 'wb') as fd:
for chunk in r.iter_content(100000): #download 100kb at a time
if self._canceled: #check right after reading
break
downloaded_size += len(chunk)
fd.write(chunk)
progressCb(2 + round((downloaded_size / content_length) * 98.0, 1))
if self._canceled: #check again before going to read next chunk
break
retries = 0 #No more retries after this
if not self._canceled:
self._manager._logger.info('Download completed for %s' % printFileId)
if item['printFileInfo'] is None:
printerManager().fileManager._metadataAnalyzer.addFileToQueue(destFile)
fileInfo = {
'id': printFileId,
'printFileName': printFileName,
'info': item['printFileInfo'],
'printer': printer,
'material': material,
'quality': quality,
'image': image,
'created': created,
'sentFromCloud' : sentFromCloud
}
em = eventManager()
if printerManager().fileManager.saveCloudPrintFile(destFile, fileInfo, FileDestinations.LOCAL):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": printFileId,
"filename": printerManager().fileManager._getBasicFilename(destFile),
"info": fileInfo["info"],
"printer": fileInfo["printer"],
"material": fileInfo["material"],
"quality": fileInfo["quality"],
"image": fileInfo["image"],
"created": fileInfo["created"]
}
)
successCb(destFile, fileInfo)
else:
errorCb(destFile, "Couldn't save the file")
elif r.status_code in [502, 503, 500]:
self._manager._logger.warn('Download failed for %s with %d. Retrying...' % (printFileId, r.status_code))
retries -= 1 #This error can be retried
else:
self._manager._logger.error('Download failed for %s with %d' % (printFileId, r.status_code))
errorCb(destFile, 'The device is unable to download the print file')
retries = 0 #No more retries after this
except requests.exceptions.ConnectTimeout as e:
self._manager._logger.warn('Connection timeout for %s. Retrying...' % printFileId)
retries -= 1 #This error can be retried
except requests.exceptions.ReadTimeout as e:
self._manager._logger.warn('Read timeout for %s' % printFileId)
not self._canceled and errorCb(destFile, 'Network erros while downloading the print file')
retries = 0 #No more retries after this
except requests.exceptions.RequestException as e:
self._manager._logger.error('Download connection exception for %s: %s' % (printFileId, e), exc_info=True)
not self._canceled and errorCb(destFile, 'Connection Error while downloading the print file')
retries = 0 #No more retries after this
except Exception as e:
retries = 0 #No more retries after this
if "'NoneType' object has no attribute 'recv'" == str(e):
# This due to a problem in the underlying library when calling r.close in the cancel routine
self._canceled = True
else:
self._manager._logger.error('Download exception for %s: %s' % (printFileId, e), exc_info=True)
not self._canceled and errorCb(destFile, 'The device is unable to download the print file')
r.close()
finally:
if self._canceled:
retries = 0 #No more retries after this
self._manager._logger.warn('Download canceled for %s' % printFileId)
errorCb(destFile, 'cancelled')
self.activeDownload = False
self._activeRequest = None
downloadQueue.task_done()
def cancel(self):
if self.activeDownload and not self._canceled:
if self._activeRequest:
self._activeRequest.close() #This can create the exception 'NoneType' object has no attribute 'recv' which is handled above
self._manager._logger.warn('Download canceled requested for %s' % self.activeDownload)
self._canceled = True
class DownloadManager(object):
_maxWorkers = 3
def __init__(self):
self._logger = logging.getLogger(__name__)
self.queue = Queue()
self._workers = []
for i in range(self._maxWorkers):
w = DownloadWorker(self)
self._workers.append( w )
w.start()
def isDownloading(self, printFileId):
for w in self._workers:
if w.activeDownload == printFileId:
return True
return False
def startDownload(self, item):
self.queue.put(item)
def cancelDownload(self, printFileId):
for w in self._workers:
if w.activeDownload == printFileId:
w.cancel()
return True
return False
def shutdown(self):
self._logger.info('Shutting down Download Manager...')
for w in self._workers:
self.queue.put('shutdown')
if w.activeDownload:
w.cancel()
| AstroPrint/AstroBox | src/astroprint/printfiles/downloadmanager.py | Python | agpl-3.0 | 7,267 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from eLisp.model import Symbol
from eLisp.expr.lambdaexpr import make_lambda
from eLisp.expr.util import (
is_tagged_list, is_symbol, cadr, caadr, caddr, cdadr, cddr)
def is_definition(expr):
return is_tagged_list(expr, Symbol('define'))
def definition_variable(expr):
if is_symbol(cadr(expr)):
return cadr(expr)
return caadr(expr)
def definition_value(expr):
if is_symbol(cadr(expr)):
return caddr(expr)
return make_lambda(cdadr(expr), cddr(expr))
| ASMlover/study | compiler/eLisp/eLisp/expr/definition.py | Python | bsd-2-clause | 1,898 |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import concurrent.futures
import csv
import datetime
import decimal
import json
import operator
import os
import time
import unittest
import uuid
import re
import six
import pytest
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import IPython
from IPython.utils import io
from IPython.testing import tools
from IPython.terminal import interactiveshell
except ImportError: # pragma: NO COVER
IPython = None
from google.api_core.exceptions import PreconditionFailed
from google.api_core.exceptions import BadRequest
from google.api_core.exceptions import Conflict
from google.api_core.exceptions import Forbidden
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import Table
from google.cloud._helpers import UTC
from google.cloud.bigquery import dbapi
from google.cloud import storage
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
JOB_TIMEOUT = 120 # 2 minutes
WHERE = os.path.abspath(os.path.dirname(__file__))
# Common table data used for many tests.
ROWS = [
('Phred Phlyntstone', 32),
('Bharney Rhubble', 33),
('Wylma Phlyntstone', 29),
('Bhettye Rhubble', 27),
]
HEADER_ROW = ('Full Name', 'Age')
SCHEMA = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA = [
bigquery.SchemaField('transaction_time', 'TIMESTAMP', mode='REQUIRED'),
bigquery.SchemaField('transaction_id', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('user_email', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('store_code', 'STRING', mode='REQUIRED'),
bigquery.SchemaField(
'items', 'RECORD', mode='REPEATED', fields=[
bigquery.SchemaField('item_code', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('quantity', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('comments', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('expiration_date', 'DATE', mode='REQUIRED'),
]),
]
def _has_rows(result):
return len(result) > 0
def _make_dataset_id(prefix):
return '%s%s' % (prefix, unique_resource_id())
def _load_json_schema(filename='data/schema.json'):
from google.cloud.bigquery.table import _parse_schema_resource
json_filename = os.path.join(WHERE, filename)
with open(json_filename, 'r') as schema_file:
return _parse_schema_resource(json.load(schema_file))
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error['reason'] == 'rateLimitExceeded'
for error in forbidden._errors)
# We need to wait to stay within the rate limits.
# The alternative outcome is a 403 Forbidden response from upstream, which
# they return instead of the more appropriate 429.
# See https://cloud.google.com/bigquery/quota-policy
retry_403 = RetryErrors(Forbidden, error_predicate=_rate_limit_exceeded)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
CURSOR = None
def setUpModule():
Config.CLIENT = bigquery.Client()
Config.CURSOR = dbapi.connect(Config.CLIENT).cursor()
class TestBigQuery(unittest.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
def _still_in_use(bad_request):
return any(error['reason'] == 'resourceInUse'
for error in bad_request._errors)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_409_429 = RetryErrors((Conflict, TooManyRequests))
for doomed in self.to_delete:
if isinstance(doomed, storage.Bucket):
retry_409_429(doomed.delete)(force=True)
elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
retry_in_use(Config.CLIENT.delete_dataset)(
doomed, delete_contents=True)
elif isinstance(doomed, (Table, bigquery.TableReference)):
retry_in_use(Config.CLIENT.delete_table)(doomed)
else:
doomed.delete()
def test_get_service_account_email(self):
client = Config.CLIENT
got = client.get_service_account_email()
self.assertIsInstance(got, six.text_type)
self.assertIn('@', got)
def test_create_dataset(self):
DATASET_ID = _make_dataset_id('create_dataset')
dataset = self.temp_dataset(DATASET_ID)
self.assertTrue(_dataset_exists(dataset))
self.assertEqual(dataset.dataset_id, DATASET_ID)
self.assertEqual(dataset.project, Config.CLIENT.project)
def test_get_dataset(self):
DATASET_ID = _make_dataset_id('get_dataset')
client = Config.CLIENT
dataset_arg = Dataset(client.dataset(DATASET_ID))
dataset_arg.friendly_name = 'Friendly'
dataset_arg.description = 'Description'
dataset = retry_403(client.create_dataset)(dataset_arg)
self.to_delete.append(dataset)
dataset_ref = client.dataset(DATASET_ID)
got = client.get_dataset(dataset_ref)
self.assertEqual(got.friendly_name, 'Friendly')
self.assertEqual(got.description, 'Description')
def test_update_dataset(self):
dataset = self.temp_dataset(_make_dataset_id('update_dataset'))
self.assertTrue(_dataset_exists(dataset))
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.description)
self.assertEquals(dataset.labels, {})
dataset.friendly_name = 'Friendly'
dataset.description = 'Description'
dataset.labels = {'priority': 'high', 'color': 'blue'}
ds2 = Config.CLIENT.update_dataset(
dataset,
('friendly_name', 'description', 'labels'))
self.assertEqual(ds2.friendly_name, 'Friendly')
self.assertEqual(ds2.description, 'Description')
self.assertEqual(ds2.labels, {'priority': 'high', 'color': 'blue'})
ds2.labels = {
'color': 'green', # change
'shape': 'circle', # add
'priority': None, # delete
}
ds3 = Config.CLIENT.update_dataset(ds2, ['labels'])
self.assertEqual(ds3.labels, {'color': 'green', 'shape': 'circle'})
# If we try to update using d2 again, it will fail because the
# previous update changed the ETag.
ds2.description = 'no good'
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_dataset(ds2, ['description'])
def test_list_datasets(self):
datasets_to_create = [
'new' + unique_resource_id(),
'newer' + unique_resource_id(),
'newest' + unique_resource_id(),
]
for dataset_id in datasets_to_create:
self.temp_dataset(dataset_id)
# Retrieve the datasets.
iterator = Config.CLIENT.list_datasets()
all_datasets = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [dataset for dataset in all_datasets
if dataset.dataset_id in datasets_to_create and
dataset.project == Config.CLIENT.project]
self.assertEqual(len(created), len(datasets_to_create))
def test_list_datasets_w_project(self):
# Retrieve datasets from a different project.
iterator = Config.CLIENT.list_datasets(project='bigquery-public-data')
all_datasets = frozenset([dataset.dataset_id for dataset in iterator])
self.assertIn('usa_names', all_datasets)
def test_create_table(self):
dataset = self.temp_dataset(_make_dataset_id('create_table'))
table_id = 'test_table'
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
def test_create_table_w_time_partitioning_w_clustering_fields(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = self.temp_dataset(_make_dataset_id('create_table_tp_cf'))
table_id = 'test_table'
table_arg = Table(
dataset.table(table_id),
schema=TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA)
self.assertFalse(_table_exists(table_arg))
table_arg.time_partitioning = TimePartitioning(
field='transaction_time')
table_arg.clustering_fields = ['user_email', 'store_code']
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
time_partitioning = table.time_partitioning
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(time_partitioning.field, 'transaction_time')
self.assertEqual(table.clustering_fields, ['user_email', 'store_code'])
def test_delete_dataset_delete_contents_true(self):
dataset_id = _make_dataset_id('delete_table_true')
dataset = retry_403(Config.CLIENT.create_dataset)(
Dataset(Config.CLIENT.dataset(dataset_id)))
table_id = 'test_table'
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
Config.CLIENT.delete_dataset(dataset, delete_contents=True)
self.assertFalse(_table_exists(table))
def test_delete_dataset_delete_contents_false(self):
from google.api_core import exceptions
dataset = self.temp_dataset(_make_dataset_id('delete_table_false'))
table_id = 'test_table'
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
retry_403(Config.CLIENT.create_table)(table_arg)
with self.assertRaises(exceptions.BadRequest):
Config.CLIENT.delete_dataset(dataset)
def test_get_table_w_public_dataset(self):
PUBLIC = 'bigquery-public-data'
DATASET_ID = 'samples'
TABLE_ID = 'shakespeare'
table_ref = DatasetReference(PUBLIC, DATASET_ID).table(TABLE_ID)
table = Config.CLIENT.get_table(table_ref)
self.assertEqual(table.table_id, TABLE_ID)
self.assertEqual(table.dataset_id, DATASET_ID)
self.assertEqual(table.project, PUBLIC)
schema_names = [field.name for field in table.schema]
self.assertEqual(
schema_names, ['word', 'word_count', 'corpus', 'corpus_date'])
def test_list_partitions(self):
table_ref = DatasetReference(
'bigquery-public-data',
'ethereum_blockchain').table('blocks')
all_rows = Config.CLIENT.list_partitions(table_ref)
self.assertIn('20180801', all_rows)
self.assertGreater(len(all_rows), 1000)
def test_list_tables(self):
DATASET_ID = _make_dataset_id('list_tables')
dataset = self.temp_dataset(DATASET_ID)
# Retrieve tables before any are created for the dataset.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertEqual(all_tables, [])
self.assertIsNone(iterator.next_page_token)
# Insert some tables to be listed.
tables_to_create = [
'new' + unique_resource_id(),
'newer' + unique_resource_id(),
'newest' + unique_resource_id(),
]
for table_name in tables_to_create:
table = Table(dataset.table(table_name), schema=SCHEMA)
created_table = retry_403(Config.CLIENT.create_table)(table)
self.to_delete.insert(0, created_table)
# Retrieve the tables.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [table for table in all_tables
if (table.table_id in tables_to_create and
table.dataset_id == DATASET_ID)]
self.assertEqual(len(created), len(tables_to_create))
def test_update_table(self):
dataset = self.temp_dataset(_make_dataset_id('update_table'))
TABLE_NAME = 'test_table'
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.description)
self.assertEquals(table.labels, {})
table.friendly_name = 'Friendly'
table.description = 'Description'
table.labels = {'priority': 'high', 'color': 'blue'}
table2 = Config.CLIENT.update_table(
table, ['friendly_name', 'description', 'labels'])
self.assertEqual(table2.friendly_name, 'Friendly')
self.assertEqual(table2.description, 'Description')
self.assertEqual(table2.labels, {'priority': 'high', 'color': 'blue'})
table2.description = None
table2.labels = {
'color': 'green', # change
'shape': 'circle', # add
'priority': None, # delete
}
table3 = Config.CLIENT.update_table(table2, ['description', 'labels'])
self.assertIsNone(table3.description)
self.assertEqual(table3.labels, {'color': 'green', 'shape': 'circle'})
# If we try to update using table2 again, it will fail because the
# previous update changed the ETag.
table2.description = 'no good'
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_table(table2, ['description'])
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id('update_table'))
TABLE_NAME = 'test_table'
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField('voter', 'BOOLEAN', mode='NULLABLE')
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ['schema'])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
@staticmethod
def _fetch_single_page(table, selected_fields=None):
iterator = Config.CLIENT.list_rows(
table, selected_fields=selected_fields)
page = six.next(iterator.pages)
return list(page)
def _create_table_many_columns(self, rowcount):
# Generate a table of maximum width via CREATE TABLE AS SELECT.
# first column is named 'rowval', and has a value from 1..rowcount
# Subsequent column is named col_<N> and contains the value N*rowval,
# where N is between 1 and 9999 inclusive.
dsname = _make_dataset_id('wide_schema')
dataset = self.temp_dataset(dsname)
table_id = 'many_columns'
table_ref = dataset.table(table_id)
self.to_delete.insert(0, table_ref)
colprojections = ','.join(
['r * {} as col_{}'.format(n, n) for n in range(1, 10000)])
sql = """
CREATE TABLE {}.{}
AS
SELECT
r as rowval,
{}
FROM
UNNEST(GENERATE_ARRAY(1,{},1)) as r
""".format(dsname, table_id, colprojections, rowcount)
query_job = Config.CLIENT.query(sql)
query_job.result()
self.assertEqual(query_job.statement_type, 'CREATE_TABLE_AS_SELECT')
self.assertEqual(query_job.ddl_operation_performed, 'CREATE')
self.assertEqual(query_job.ddl_target_table, table_ref)
return table_ref
def test_query_many_columns(self):
# Test working with the widest schema BigQuery supports, 10k columns.
row_count = 2
table_ref = self._create_table_many_columns(row_count)
rows = list(Config.CLIENT.query(
'SELECT * FROM `{}.{}`'.format(
table_ref.dataset_id, table_ref.table_id)))
self.assertEqual(len(rows), row_count)
# check field representations adhere to expected values.
correctwidth = 0
badvals = 0
for r in rows:
vals = r._xxx_values
rowval = vals[0]
if len(vals) == 10000:
correctwidth = correctwidth + 1
for n in range(1, 10000):
if vals[n] != rowval * (n):
badvals = badvals + 1
self.assertEqual(correctwidth, row_count)
self.assertEqual(badvals, 0)
def test_insert_rows_then_dump_table(self):
NOW_SECONDS = 1448911495.484366
NOW = datetime.datetime.utcfromtimestamp(
NOW_SECONDS).replace(tzinfo=UTC)
ROWS = [
('Phred Phlyntstone', 32, NOW),
('Bharney Rhubble', 33, NOW + datetime.timedelta(seconds=10)),
('Wylma Phlyntstone', 29, NOW + datetime.timedelta(seconds=20)),
('Bhettye Rhubble', 27, None),
]
ROW_IDS = range(len(ROWS))
dataset = self.temp_dataset(_make_dataset_id('insert_rows_then_dump'))
TABLE_ID = 'test_table'
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('now', 'TIMESTAMP'),
]
table_arg = Table(dataset.table(TABLE_ID), schema=schema)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
errors = Config.CLIENT.insert_rows(table, ROWS, row_ids=ROW_IDS)
self.assertEqual(len(errors), 0)
rows = ()
# Allow for "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age),
sorted(ROWS, key=by_age))
def test_load_table_from_local_avro_file_then_dump_table(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_NAME = 'test_table_avro'
ROWS = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650)]
dataset = self.temp_dataset(_make_dataset_id('load_local_then_dump'))
table_ref = dataset.table(TABLE_NAME)
table = Table(table_ref)
self.to_delete.insert(0, table)
with open(os.path.join(WHERE, 'data', 'colors.avro'), 'rb') as avrof:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = Config.CLIENT.load_table_from_file(
avrof, table_ref, job_config=config)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(ROWS))
table = Config.CLIENT.get_table(table)
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_wavelength = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_wavelength),
sorted(ROWS, key=by_wavelength))
def test_load_avro_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
table_name = 'test_table'
rows = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650)
]
with open(os.path.join(WHERE, 'data', 'colors.avro'), 'rb') as f:
GS_URL = self._write_avro_to_storage(
'bq_load_test' + unique_resource_id(), 'colors.avro', f)
dataset = self.temp_dataset(_make_dataset_id('bq_load_test'))
table_arg = dataset.table(table_name)
table = retry_403(Config.CLIENT.create_table)(Table(table_arg))
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(
GS_URL, table_arg, job_config=config)
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(rows))
table = Config.CLIENT.get_table(table)
fetched = self._fetch_single_page(table)
row_tuples = [r.values() for r in fetched]
self.assertEqual(sorted(row_tuples, key=lambda x: x[1]),
sorted(rows, key=lambda x: x[1]))
def test_load_table_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_ID = 'test_table'
GS_URL = self._write_csv_to_storage(
'bq_load_test' + unique_resource_id(), 'person_ages.csv',
HEADER_ROW, ROWS)
dataset = self.temp_dataset(_make_dataset_id('load_gcs_then_dump'))
table_arg = Table(dataset.table(TABLE_ID), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.skip_leading_rows = 1
config.source_format = SourceFormat.CSV
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(
GS_URL, dataset.table(TABLE_ID), job_config=config)
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age),
sorted(ROWS, key=by_age))
def test_load_table_from_file_w_explicit_location(self):
# Create a temporary bucket for extract files.
storage_client = storage.Client()
bucket_name = 'bq_load_table_eu_extract_test' + unique_resource_id()
bucket = storage_client.bucket(bucket_name)
bucket.location = 'eu'
self.to_delete.append(bucket)
bucket.create()
# Create a temporary dataset & table in the EU.
table_bytes = six.BytesIO(b'a,3\nb,2\nc,1\n')
client = Config.CLIENT
dataset = self.temp_dataset(
_make_dataset_id('eu_load_file'), location='EU')
table_ref = dataset.table('letters')
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 0
job_config.schema = [
bigquery.SchemaField('letter', 'STRING'),
bigquery.SchemaField('value', 'INTEGER'),
]
# Load the file to an EU dataset with an EU load job.
load_job = client.load_table_from_file(
table_bytes, table_ref, location='EU', job_config=job_config)
load_job.result()
job_id = load_job.job_id
# Can get the job from the EU.
load_job = client.get_job(job_id, location='EU')
self.assertEqual(job_id, load_job.job_id)
self.assertEqual('EU', load_job.location)
self.assertTrue(load_job.exists())
# Cannot get the job from the US.
with self.assertRaises(NotFound):
client.get_job(job_id, location='US')
load_job_us = client.get_job(job_id)
load_job_us._properties['jobReference']['location'] = 'US'
self.assertFalse(load_job_us.exists())
with self.assertRaises(NotFound):
load_job_us.reload()
# Can cancel the job from the EU.
self.assertTrue(load_job.cancel())
load_job = client.cancel_job(job_id, location='EU')
self.assertEqual(job_id, load_job.job_id)
self.assertEqual('EU', load_job.location)
# Cannot cancel the job from the US.
with self.assertRaises(NotFound):
client.cancel_job(job_id, location='US')
with self.assertRaises(NotFound):
load_job_us.cancel()
# Can list the table rows.
table = client.get_table(table_ref)
self.assertEqual(table.num_rows, 3)
rows = [(row.letter, row.value) for row in client.list_rows(table)]
self.assertEqual(
list(sorted(rows)), [('a', 3), ('b', 2), ('c', 1)])
# Verify location behavior with queries
query_config = bigquery.QueryJobConfig()
query_config.dry_run = True
query_string = 'SELECT * FROM `{}.letters` LIMIT 1'.format(
dataset.dataset_id)
eu_query = client.query(
query_string,
location='EU',
job_config=query_config)
self.assertTrue(eu_query.done)
# Cannot query from US.
with self.assertRaises(BadRequest):
list(client.query(
query_string,
location='US',
job_config=query_config))
# Cannot copy from US.
with self.assertRaises(BadRequest):
client.copy_table(
table_ref, dataset.table('letters2_us'),
location='US').result()
# Cannot extract from US.
with self.assertRaises(BadRequest):
client.extract_table(
table_ref,
'gs://{}/letters-us.csv'.format(bucket_name),
location='US').result()
def _create_storage(self, bucket_name, blob_name):
storage_client = storage.Client()
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
bucket = storage_client.create_bucket(bucket_name)
self.to_delete.append(bucket)
return bucket.blob(blob_name)
def _write_csv_to_storage(self, bucket_name, blob_name, header_row,
data_rows):
from google.cloud._testing import _NamedTemporaryFile
blob = self._create_storage(bucket_name, blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, 'w') as csv_write:
writer = csv.writer(csv_write)
writer.writerow(header_row)
writer.writerows(data_rows)
with open(temp.name, 'rb') as csv_read:
blob.upload_from_file(csv_read, content_type='text/csv')
self.to_delete.insert(0, blob)
return 'gs://{}/{}'.format(bucket_name, blob_name)
def _write_avro_to_storage(self, bucket_name, blob_name, avro_file):
blob = self._create_storage(bucket_name, blob_name)
blob.upload_from_file(avro_file,
content_type='application/x-avro-binary')
self.to_delete.insert(0, blob)
return 'gs://{}/{}'.format(bucket_name, blob_name)
def _load_table_for_extract_table(
self, storage_client, rows, bucket_name, blob_name, table):
from google.cloud._testing import _NamedTemporaryFile
gs_url = 'gs://{}/{}'.format(bucket_name, blob_name)
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
bucket = storage_client.create_bucket(bucket_name)
self.to_delete.append(bucket)
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, 'w') as csv_write:
writer = csv.writer(csv_write)
writer.writerow(HEADER_ROW)
writer.writerows(rows)
with open(temp.name, 'rb') as csv_read:
blob.upload_from_file(csv_read, content_type='text/csv')
self.to_delete.insert(0, blob)
dataset = self.temp_dataset(table.dataset_id)
table_ref = dataset.table(table.table_id)
config = bigquery.LoadJobConfig()
config.autodetect = True
job = Config.CLIENT.load_table_from_uri(gs_url, table_ref,
job_config=config)
# TODO(jba): do we need this retry now that we have job.result()?
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
def test_extract_table(self):
from google.cloud.storage import Client as StorageClient
storage_client = StorageClient()
local_id = unique_resource_id()
bucket_name = 'bq_extract_test' + local_id
blob_name = 'person_ages.csv'
dataset_id = _make_dataset_id('load_gcs_then_extract')
table_id = 'test_table'
table_ref = Config.CLIENT.dataset(dataset_id).table(table_id)
table = Table(table_ref)
self.to_delete.insert(0, table)
self._load_table_for_extract_table(
storage_client, ROWS, bucket_name, blob_name, table_ref)
bucket = storage_client.bucket(bucket_name)
destination_blob_name = 'person_ages_out.csv'
destination = bucket.blob(destination_blob_name)
destination_uri = 'gs://{}/person_ages_out.csv'.format(bucket_name)
job = Config.CLIENT.extract_table(table_ref, destination_uri)
job.result(timeout=100)
self.to_delete.insert(0, destination)
got = destination.download_as_string().decode('utf-8')
self.assertIn('Bharney Rhubble', got)
def test_copy_table(self):
# If we create a new table to copy from, the test won't work
# because the new rows will be stored in the streaming buffer,
# and copy jobs don't read the streaming buffer.
# We could wait for the streaming buffer to empty, but that could
# take minutes. Instead we copy a small public table.
source_dataset = DatasetReference('bigquery-public-data', 'samples')
source_ref = source_dataset.table('shakespeare')
dest_dataset = self.temp_dataset(_make_dataset_id('copy_table'))
dest_ref = dest_dataset.table('destination_table')
job_config = bigquery.CopyJobConfig()
job = Config.CLIENT.copy_table(
source_ref, dest_ref, job_config=job_config)
job.result()
dest_table = Config.CLIENT.get_table(dest_ref)
self.to_delete.insert(0, dest_table)
# Just check that we got some rows.
got_rows = self._fetch_single_page(dest_table)
self.assertTrue(len(got_rows) > 0)
def test_job_cancel(self):
DATASET_ID = _make_dataset_id('job_cancel')
JOB_ID_PREFIX = 'fetch_' + DATASET_ID
TABLE_NAME = 'test_table'
QUERY = 'SELECT * FROM %s.%s' % (DATASET_ID, TABLE_NAME)
dataset = self.temp_dataset(DATASET_ID)
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
job.cancel()
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
def test_get_failed_job(self):
# issue 4246
from google.api_core.exceptions import BadRequest
JOB_ID = 'invalid_{}'.format(str(uuid.uuid4()))
QUERY = 'SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);'
PARAM = bigquery.ScalarQueryParameter(
'ts_value', 'TIMESTAMP', 1.4810976E9)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [PARAM]
with self.assertRaises(BadRequest):
Config.CLIENT.query(
QUERY, job_id=JOB_ID, job_config=job_config).result()
job = Config.CLIENT.get_job(JOB_ID)
with self.assertRaises(ValueError):
job.query_parameters
def test_query_w_legacy_sql_types(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
stamp = '%s %s' % (naive.date().isoformat(), naive.time().isoformat())
zoned = naive.replace(tzinfo=UTC)
examples = [
{
'sql': 'SELECT 1',
'expected': 1,
},
{
'sql': 'SELECT 1.3',
'expected': 1.3,
},
{
'sql': 'SELECT TRUE',
'expected': True,
},
{
'sql': 'SELECT "ABC"',
'expected': 'ABC',
},
{
'sql': 'SELECT CAST("foo" AS BYTES)',
'expected': b'foo',
},
{
'sql': 'SELECT CAST("%s" AS TIMESTAMP)' % (stamp,),
'expected': zoned,
},
]
for example in examples:
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
rows = list(Config.CLIENT.query(
example['sql'], job_config=job_config))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example['expected'])
def _generate_standard_sql_types_examples(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_microseconds = datetime.datetime(2016, 12, 5, 12, 41, 9, 250000)
stamp = '%s %s' % (naive.date().isoformat(), naive.time().isoformat())
stamp_microseconds = stamp + '.250000'
zoned = naive.replace(tzinfo=UTC)
zoned_microseconds = naive_microseconds.replace(tzinfo=UTC)
numeric = decimal.Decimal('123456789.123456789')
return [
{
'sql': 'SELECT 1',
'expected': 1,
},
{
'sql': 'SELECT 1.3',
'expected': 1.3,
},
{
'sql': 'SELECT TRUE',
'expected': True,
},
{
'sql': 'SELECT "ABC"',
'expected': 'ABC',
},
{
'sql': 'SELECT CAST("foo" AS BYTES)',
'expected': b'foo',
},
{
'sql': 'SELECT TIMESTAMP "%s"' % (stamp,),
'expected': zoned,
},
{
'sql': 'SELECT TIMESTAMP "%s"' % (stamp_microseconds,),
'expected': zoned_microseconds,
},
{
'sql': 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp,),
'expected': naive,
},
{
'sql': 'SELECT DATETIME(TIMESTAMP "%s")' % (
stamp_microseconds,),
'expected': naive_microseconds,
},
{
'sql': 'SELECT DATE(TIMESTAMP "%s")' % (stamp,),
'expected': naive.date(),
},
{
'sql': 'SELECT TIME(TIMESTAMP "%s")' % (stamp,),
'expected': naive.time(),
},
{
'sql': 'SELECT NUMERIC "%s"' % (numeric,),
'expected': numeric,
},
{
'sql': 'SELECT (1, 2)',
'expected': {'_field_1': 1, '_field_2': 2},
},
{
'sql': 'SELECT ((1, 2), (3, 4), 5)',
'expected': {
'_field_1': {'_field_1': 1, '_field_2': 2},
'_field_2': {'_field_1': 3, '_field_2': 4},
'_field_3': 5,
},
},
{
'sql': 'SELECT [1, 2, 3]',
'expected': [1, 2, 3],
},
{
'sql': 'SELECT ([1, 2], 3, [4, 5])',
'expected':
{'_field_1': [1, 2], '_field_2': 3, '_field_3': [4, 5]},
},
{
'sql': 'SELECT [(1, 2, 3), (4, 5, 6)]',
'expected': [
{'_field_1': 1, '_field_2': 2, '_field_3': 3},
{'_field_1': 4, '_field_2': 5, '_field_3': 6},
],
},
{
'sql': 'SELECT [([1, 2, 3], 4), ([5, 6], 7)]',
'expected': [
{u'_field_1': [1, 2, 3], u'_field_2': 4},
{u'_field_1': [5, 6], u'_field_2': 7},
],
},
{
'sql': 'SELECT ARRAY(SELECT STRUCT([1, 2]))',
'expected': [{u'_field_1': [1, 2]}],
},
]
def test_query_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
rows = list(Config.CLIENT.query(example['sql']))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example['expected'])
def test_query_w_failed_query(self):
from google.api_core.exceptions import BadRequest
with self.assertRaises(BadRequest):
Config.CLIENT.query('invalid syntax;').result()
def test_query_w_wrong_config(self):
from google.cloud.bigquery.job import LoadJobConfig
good_query = 'SELECT 1;'
rows = list(Config.CLIENT.query('SELECT 1;').result())
assert rows[0][0] == 1
bad_config = LoadJobConfig()
bad_config.destination = Config.CLIENT.dataset('dset').table('tbl')
with self.assertRaises(Exception):
Config.CLIENT.query(good_query, job_config=bad_config).result()
def test_query_w_timeout(self):
query_job = Config.CLIENT.query(
'SELECT * FROM `bigquery-public-data.github_repos.commits`;',
job_id_prefix='test_query_w_timeout_')
with self.assertRaises(concurrent.futures.TimeoutError):
# 1 second is much too short for this query.
query_job.result(timeout=1)
def test_query_statistics(self):
"""
A system test to exercise some of the extended query statistics.
Note: We construct a query that should need at least three stages by
specifying a JOIN query. Exact plan and stats are effectively
non-deterministic, so we're largely interested in confirming values
are present.
"""
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
query_job = Config.CLIENT.query(
"""
SELECT
COUNT(1)
FROM
(
SELECT
year,
wban_number
FROM `bigquery-public-data.samples.gsod`
LIMIT 1000
) lside
INNER JOIN
(
SELECT
year,
state
FROM `bigquery-public-data.samples.natality`
LIMIT 1000
) rside
ON
lside.year = rside.year
""",
location='US',
job_config=job_config)
# run the job to completion
query_job.result()
# Assert top-level stats
self.assertFalse(query_job.cache_hit)
self.assertIsNotNone(query_job.destination)
self.assertTrue(query_job.done)
self.assertFalse(query_job.dry_run)
self.assertIsNone(query_job.num_dml_affected_rows)
self.assertEqual(query_job.priority, 'INTERACTIVE')
self.assertGreater(query_job.total_bytes_billed, 1)
self.assertGreater(query_job.total_bytes_processed, 1)
self.assertEqual(query_job.statement_type, 'SELECT')
self.assertGreater(query_job.slot_millis, 1)
# Make assertions on the shape of the query plan.
plan = query_job.query_plan
self.assertGreaterEqual(len(plan), 3)
first_stage = plan[0]
self.assertIsNotNone(first_stage.start)
self.assertIsNotNone(first_stage.end)
self.assertIsNotNone(first_stage.entry_id)
self.assertIsNotNone(first_stage.name)
self.assertGreater(first_stage.parallel_inputs, 0)
self.assertGreater(first_stage.completed_parallel_inputs, 0)
self.assertGreater(first_stage.shuffle_output_bytes, 0)
self.assertEqual(first_stage.status, 'COMPLETE')
# Query plan is a digraph. Ensure it has inter-stage links,
# but not every stage has inputs.
stages_with_inputs = 0
for entry in plan:
if len(entry.input_stages) > 0:
stages_with_inputs = stages_with_inputs + 1
self.assertGreater(stages_with_inputs, 0)
self.assertGreater(len(plan), stages_with_inputs)
def test_dbapi_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
Config.CURSOR.execute(example['sql'])
self.assertEqual(Config.CURSOR.rowcount, 1)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1)
self.assertEqual(row[0], example['expected'])
row = Config.CURSOR.fetchone()
self.assertIsNone(row)
def test_dbapi_fetchall(self):
query = 'SELECT * FROM UNNEST([(1, 2), (3, 4), (5, 6)])'
for arraysize in range(1, 5):
Config.CURSOR.execute(query)
self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
Config.CURSOR.arraysize = arraysize
rows = Config.CURSOR.fetchall()
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, [(1, 2), (3, 4), (5, 6)])
def _load_table_for_dml(self, rows, dataset_id, table_id):
from google.cloud._testing import _NamedTemporaryFile
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
dataset = self.temp_dataset(dataset_id)
greeting = bigquery.SchemaField(
'greeting', 'STRING', mode='NULLABLE')
table_ref = dataset.table(table_id)
table_arg = Table(table_ref, schema=[greeting])
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
with _NamedTemporaryFile() as temp:
with open(temp.name, 'w') as csv_write:
writer = csv.writer(csv_write)
writer.writerow(('Greeting',))
writer.writerows(rows)
with open(temp.name, 'rb') as csv_read:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.CSV
config.skip_leading_rows = 1
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_file(
csv_read, table_ref, job_config=config)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self._fetch_single_page(table)
def test_query_w_dml(self):
dataset_name = _make_dataset_id('dml_query')
table_name = 'test_table'
self._load_table_for_dml([('Hello World',)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
query_job = Config.CLIENT.query(
query_template.format(dataset_name, table_name),
job_id_prefix='test_query_w_dml_')
query_job.result()
self.assertEqual(query_job.num_dml_affected_rows, 1)
def test_dbapi_w_dml(self):
dataset_name = _make_dataset_id('dml_dbapi')
table_name = 'test_table'
self._load_table_for_dml([('Hello World',)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
Config.CURSOR.execute(
query_template.format(dataset_name, table_name),
job_id='test_dbapi_w_dml_{}'.format(str(uuid.uuid4())))
self.assertEqual(Config.CURSOR.rowcount, 1)
self.assertIsNone(Config.CURSOR.fetchone())
def test_query_w_query_params(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
question = 'What is the answer to life, the universe, and everything?'
question_param = ScalarQueryParameter(
name='question', type_='STRING', value=question)
answer = 42
answer_param = ScalarQueryParameter(
name='answer', type_='INT64', value=answer)
pi = 3.1415926
pi_param = ScalarQueryParameter(
name='pi', type_='FLOAT64', value=pi)
pi_numeric = decimal.Decimal('3.141592654')
pi_numeric_param = ScalarQueryParameter(
name='pi_numeric_param', type_='NUMERIC',
value=pi_numeric)
truthy = True
truthy_param = ScalarQueryParameter(
name='truthy', type_='BOOL', value=truthy)
beef = b'DEADBEEF'
beef_param = ScalarQueryParameter(
name='beef', type_='BYTES', value=beef)
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_param = ScalarQueryParameter(
name='naive', type_='DATETIME', value=naive)
naive_date_param = ScalarQueryParameter(
name='naive_date', type_='DATE', value=naive.date())
naive_time_param = ScalarQueryParameter(
name='naive_time', type_='TIME', value=naive.time())
zoned = naive.replace(tzinfo=UTC)
zoned_param = ScalarQueryParameter(
name='zoned', type_='TIMESTAMP', value=zoned)
array_param = ArrayQueryParameter(
name='array_param', array_type='INT64', values=[1, 2])
struct_param = StructQueryParameter(
'hitchhiker', question_param, answer_param)
phred_name = 'Phred Phlyntstone'
phred_name_param = ScalarQueryParameter(
name='name', type_='STRING', value=phred_name)
phred_age = 32
phred_age_param = ScalarQueryParameter(
name='age', type_='INT64', value=phred_age)
phred_param = StructQueryParameter(
None, phred_name_param, phred_age_param)
bharney_name = 'Bharney Rhubbyl'
bharney_name_param = ScalarQueryParameter(
name='name', type_='STRING', value=bharney_name)
bharney_age = 31
bharney_age_param = ScalarQueryParameter(
name='age', type_='INT64', value=bharney_age)
bharney_param = StructQueryParameter(
None, bharney_name_param, bharney_age_param)
characters_param = ArrayQueryParameter(
name=None, array_type='RECORD',
values=[phred_param, bharney_param])
hero_param = StructQueryParameter(
'hero', phred_name_param, phred_age_param)
sidekick_param = StructQueryParameter(
'sidekick', bharney_name_param, bharney_age_param)
roles_param = StructQueryParameter(
'roles', hero_param, sidekick_param)
friends_param = ArrayQueryParameter(
name='friends', array_type='STRING',
values=[phred_name, bharney_name])
with_friends_param = StructQueryParameter(None, friends_param)
top_left_param = StructQueryParameter(
'top_left',
ScalarQueryParameter('x', 'INT64', 12),
ScalarQueryParameter('y', 'INT64', 102))
bottom_right_param = StructQueryParameter(
'bottom_right',
ScalarQueryParameter('x', 'INT64', 22),
ScalarQueryParameter('y', 'INT64', 92))
rectangle_param = StructQueryParameter(
'rectangle', top_left_param, bottom_right_param)
examples = [
{
'sql': 'SELECT @question',
'expected': question,
'query_parameters': [question_param],
},
{
'sql': 'SELECT @answer',
'expected': answer,
'query_parameters': [answer_param],
},
{
'sql': 'SELECT @pi',
'expected': pi,
'query_parameters': [pi_param],
},
{
'sql': 'SELECT @pi_numeric_param',
'expected': pi_numeric,
'query_parameters': [pi_numeric_param],
},
{
'sql': 'SELECT @truthy',
'expected': truthy,
'query_parameters': [truthy_param],
},
{
'sql': 'SELECT @beef',
'expected': beef,
'query_parameters': [beef_param],
},
{
'sql': 'SELECT @naive',
'expected': naive,
'query_parameters': [naive_param],
},
{
'sql': 'SELECT @naive_date',
'expected': naive.date(),
'query_parameters': [naive_date_param],
},
{
'sql': 'SELECT @naive_time',
'expected': naive.time(),
'query_parameters': [naive_time_param],
},
{
'sql': 'SELECT @zoned',
'expected': zoned,
'query_parameters': [zoned_param],
},
{
'sql': 'SELECT @array_param',
'expected': [1, 2],
'query_parameters': [array_param],
},
{
'sql': 'SELECT (@hitchhiker.question, @hitchhiker.answer)',
'expected': ({'_field_1': question, '_field_2': answer}),
'query_parameters': [struct_param],
},
{
'sql':
'SELECT '
'((@rectangle.bottom_right.x - @rectangle.top_left.x) '
'* (@rectangle.top_left.y - @rectangle.bottom_right.y))',
'expected': 100,
'query_parameters': [rectangle_param],
},
{
'sql': 'SELECT ?',
'expected': [
{'name': phred_name, 'age': phred_age},
{'name': bharney_name, 'age': bharney_age},
],
'query_parameters': [characters_param],
},
{
'sql': 'SELECT @roles',
'expected': {
'hero': {'name': phred_name, 'age': phred_age},
'sidekick': {'name': bharney_name, 'age': bharney_age},
},
'query_parameters': [roles_param],
},
{
'sql': 'SELECT ?',
'expected': {
'friends': [phred_name, bharney_name],
},
'query_parameters': [with_friends_param],
},
]
for example in examples:
jconfig = QueryJobConfig()
jconfig.query_parameters = example['query_parameters']
query_job = Config.CLIENT.query(
example['sql'],
job_config=jconfig,
job_id_prefix='test_query_w_query_params')
rows = list(query_job.result())
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example['expected'])
def test_dbapi_w_query_parameters(self):
examples = [
{
'sql': 'SELECT %(boolval)s',
'expected': True,
'query_parameters': {
'boolval': True,
},
},
{
'sql': 'SELECT %(a "very" weird `name`)s',
'expected': True,
'query_parameters': {
'a "very" weird `name`': True,
},
},
{
'sql': 'SELECT %(select)s',
'expected': True,
'query_parameters': {
'select': True, # this name is a keyword
},
},
{
'sql': 'SELECT %s',
'expected': False,
'query_parameters': [False],
},
{
'sql': 'SELECT %(intval)s',
'expected': 123,
'query_parameters': {
'intval': 123,
},
},
{
'sql': 'SELECT %s',
'expected': -123456789,
'query_parameters': [-123456789],
},
{
'sql': 'SELECT %(floatval)s',
'expected': 1.25,
'query_parameters': {
'floatval': 1.25,
},
},
{
'sql': 'SELECT LOWER(%(strval)s)',
'query_parameters': {
'strval': 'I Am A String',
},
'expected': 'i am a string',
},
{
'sql': 'SELECT DATE_SUB(%(dateval)s, INTERVAL 1 DAY)',
'query_parameters': {
'dateval': datetime.date(2017, 4, 2),
},
'expected': datetime.date(2017, 4, 1),
},
{
'sql': 'SELECT TIME_ADD(%(timeval)s, INTERVAL 4 SECOND)',
'query_parameters': {
'timeval': datetime.time(12, 34, 56),
},
'expected': datetime.time(12, 35, 0),
},
{
'sql': (
'SELECT DATETIME_ADD(%(datetimeval)s, INTERVAL 53 SECOND)'
),
'query_parameters': {
'datetimeval': datetime.datetime(2012, 3, 4, 5, 6, 7),
},
'expected': datetime.datetime(2012, 3, 4, 5, 7, 0),
},
{
'sql': 'SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)',
'query_parameters': {
'zoned': datetime.datetime(
2012, 3, 4, 5, 6, 7, tzinfo=UTC),
},
'expected': datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
{
'sql': 'SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)',
'query_parameters': {
'zoned': datetime.datetime(
2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC),
},
'expected': datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
]
for example in examples:
msg = 'sql: {} query_parameters: {}'.format(
example['sql'], example['query_parameters'])
Config.CURSOR.execute(example['sql'], example['query_parameters'])
self.assertEqual(Config.CURSOR.rowcount, 1, msg=msg)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1, msg=msg)
self.assertEqual(row[0], example['expected'], msg=msg)
row = Config.CURSOR.fetchone()
self.assertIsNone(row, msg=msg)
def test_large_query_w_public_data(self):
PUBLIC = 'bigquery-public-data'
DATASET_ID = 'samples'
TABLE_NAME = 'natality'
LIMIT = 1000
SQL = 'SELECT * from `{}.{}.{}` LIMIT {}'.format(
PUBLIC, DATASET_ID, TABLE_NAME, LIMIT)
query_job = Config.CLIENT.query(SQL)
rows = list(query_job)
self.assertEqual(len(rows), LIMIT)
def test_query_future(self):
query_job = Config.CLIENT.query('SELECT 1')
iterator = query_job.result(timeout=JOB_TIMEOUT)
row_tuples = [r.values() for r in iterator]
self.assertEqual(row_tuples, [(1,)])
def test_query_iter(self):
import types
query_job = Config.CLIENT.query('SELECT 1')
self.assertIsInstance(iter(query_job), types.GeneratorType)
row_tuples = [r.values() for r in query_job]
self.assertEqual(row_tuples, [(1,)])
@unittest.skipIf(pandas is None, 'Requires `pandas`')
def test_query_results_to_dataframe(self):
QUERY = """
SELECT id, author, time_ts, dead
from `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
df = Config.CLIENT.query(QUERY).result().to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ['id', 'author', 'time_ts', 'dead']
self.assertEqual(list(df), column_names) # verify the column names
exp_datatypes = {'id': int, 'author': six.text_type,
'time_ts': pandas.Timestamp, 'dead': bool}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
def test_insert_rows_nested_nested(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF('string_col', 'STRING', mode='NULLABLE'),
SF('record_col', 'RECORD', mode='NULLABLE', fields=[
SF('nested_string', 'STRING', mode='NULLABLE'),
SF('nested_repeated', 'INTEGER', mode='REPEATED'),
SF('nested_record', 'RECORD', mode='NULLABLE', fields=[
SF('nested_nested_string', 'STRING', mode='NULLABLE'),
]),
]),
]
record = {
'nested_string': 'another string value',
'nested_repeated': [0, 1, 2],
'nested_record': {'nested_nested_string': 'some deep insight'},
}
to_insert = [
('Some value', record)
]
table_id = 'test_table'
dataset = self.temp_dataset(_make_dataset_id('issue_2951'))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, to_insert)
def test_insert_rows_nested_nested_dictionary(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF('string_col', 'STRING', mode='NULLABLE'),
SF('record_col', 'RECORD', mode='NULLABLE', fields=[
SF('nested_string', 'STRING', mode='NULLABLE'),
SF('nested_repeated', 'INTEGER', mode='REPEATED'),
SF('nested_record', 'RECORD', mode='NULLABLE', fields=[
SF('nested_nested_string', 'STRING', mode='NULLABLE'),
]),
]),
]
record = {
'nested_string': 'another string value',
'nested_repeated': [0, 1, 2],
'nested_record': {'nested_nested_string': 'some deep insight'},
}
to_insert = [
{'string_col': 'Some value', 'record_col': record}
]
table_id = 'test_table'
dataset = self.temp_dataset(_make_dataset_id('issue_2951'))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
expected_rows = [('Some value', record)]
self.assertEqual(row_tuples, expected_rows)
def test_create_table_rows_fetch_nested_schema(self):
table_name = 'test_table'
dataset = self.temp_dataset(
_make_dataset_id('create_table_nested_schema'))
schema = _load_json_schema()
table_arg = Table(dataset.table(table_name), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_name)
to_insert = []
# Data is in "JSON Lines" format, see http://jsonlines.org/
json_filename = os.path.join(WHERE, 'data', 'characters.jsonl')
with open(json_filename) as rows_file:
for line in rows_file:
to_insert.append(json.loads(line))
errors = Config.CLIENT.insert_rows_json(table, to_insert)
self.assertEqual(len(errors), 0)
retry = RetryResult(_has_rows, max_tries=8)
fetched = retry(self._fetch_single_page)(table)
fetched_tuples = [f.values() for f in fetched]
self.assertEqual(len(fetched), len(to_insert))
for found, expected in zip(sorted(fetched_tuples), to_insert):
self.assertEqual(found[0], expected['Name'])
self.assertEqual(found[1], int(expected['Age']))
self.assertEqual(found[2], expected['Weight'])
self.assertEqual(found[3], expected['IsMagic'])
self.assertEqual(len(found[4]), len(expected['Spells']))
for f_spell, e_spell in zip(found[4], expected['Spells']):
self.assertEqual(f_spell['Name'], e_spell['Name'])
parts = time.strptime(
e_spell['LastUsed'], '%Y-%m-%d %H:%M:%S UTC')
e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
self.assertEqual(f_spell['LastUsed'], e_used)
self.assertEqual(f_spell['DiscoveredBy'],
e_spell['DiscoveredBy'])
self.assertEqual(f_spell['Properties'], e_spell['Properties'])
e_icon = base64.standard_b64decode(
e_spell['Icon'].encode('ascii'))
self.assertEqual(f_spell['Icon'], e_icon)
parts = time.strptime(expected['TeaTime'], '%H:%M:%S')
e_teatime = datetime.time(*parts[3:6])
self.assertEqual(found[5], e_teatime)
parts = time.strptime(expected['NextVacation'], '%Y-%m-%d')
e_nextvac = datetime.date(*parts[0:3])
self.assertEqual(found[6], e_nextvac)
parts = time.strptime(expected['FavoriteTime'],
'%Y-%m-%dT%H:%M:%S')
e_favtime = datetime.datetime(*parts[0:6])
self.assertEqual(found[7], e_favtime)
self.assertEqual(found[8],
decimal.Decimal(expected['FavoriteNumber']))
def _fetch_dataframe(self, query):
return Config.CLIENT.query(query).result().to_dataframe()
@unittest.skipIf(pandas is None, 'Requires `pandas`')
def test_nested_table_to_dataframe(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
SF = bigquery.SchemaField
schema = [
SF('string_col', 'STRING', mode='NULLABLE'),
SF('record_col', 'RECORD', mode='NULLABLE', fields=[
SF('nested_string', 'STRING', mode='NULLABLE'),
SF('nested_repeated', 'INTEGER', mode='REPEATED'),
SF('nested_record', 'RECORD', mode='NULLABLE', fields=[
SF('nested_nested_string', 'STRING', mode='NULLABLE'),
]),
]),
]
record = {
'nested_string': 'another string value',
'nested_repeated': [0, 1, 2],
'nested_record': {'nested_nested_string': 'some deep insight'},
}
to_insert = [
{'string_col': 'Some value', 'record_col': record},
]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO('{}\n'.format('\n'.join(rows)).encode('ascii'))
table_id = 'test_table'
dataset = self.temp_dataset(_make_dataset_id('nested_df'))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(
body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(
table, selected_fields=schema).to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 1) # verify the number of rows
exp_columns = ['string_col', 'record_col']
self.assertEqual(list(df), exp_columns) # verify the column names
row = df.iloc[0]
# verify the row content
self.assertEqual(row['string_col'], 'Some value')
self.assertEqual(row['record_col'], record)
# verify that nested data can be accessed with indices/keys
self.assertEqual(row['record_col']['nested_repeated'][0], 0)
self.assertEqual(
row['record_col']['nested_record']['nested_nested_string'],
'some deep insight')
def test_list_rows_empty_table(self):
from google.cloud.bigquery.table import RowIterator
dataset_id = _make_dataset_id('empty_table')
dataset = self.temp_dataset(dataset_id)
table_ref = dataset.table('empty_table')
table = Config.CLIENT.create_table(bigquery.Table(table_ref))
# It's a bit silly to list rows for an empty table, but this does
# happen as the result of a DDL query from an IPython magic command.
rows = Config.CLIENT.list_rows(table)
self.assertIsInstance(rows, RowIterator)
self.assertEqual(tuple(rows), ())
def test_list_rows_page_size(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
num_items = 7
page_size = 3
num_pages, num_last_page = divmod(num_items, page_size)
SF = bigquery.SchemaField
schema = [SF('string_col', 'STRING', mode='NULLABLE')]
to_insert = [{'string_col': 'item%d' % i} for i in range(num_items)]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO('{}\n'.format('\n'.join(rows)).encode('ascii'))
table_id = 'test_table'
dataset = self.temp_dataset(_make_dataset_id('nested_df'))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(
body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(
table, selected_fields=schema, page_size=page_size)
pages = df.pages
for i in range(num_pages):
page = next(pages)
self.assertEqual(page.num_items, page_size)
page = next(pages)
self.assertEqual(page.num_items, num_last_page)
def temp_dataset(self, dataset_id, location=None):
dataset = Dataset(Config.CLIENT.dataset(dataset_id))
if location:
dataset.location = location
dataset = retry_403(Config.CLIENT.create_dataset)(dataset)
self.to_delete.append(dataset)
return dataset
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
@pytest.mark.skipif(IPython is None, reason='Requires `ipython`')
@pytest.mark.usefixtures('ipython_interactive')
def test_bigquery_magic():
ip = IPython.get_ipython()
ip.extension_manager.load_extension('google.cloud.bigquery')
sql = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10
"""
with io.capture_output() as captured:
result = ip.run_cell_magic('bigquery', '', sql)
lines = re.split('\n|\r', captured.stdout)
# Removes blanks & terminal code (result of display clearing)
updates = list(filter(lambda x: bool(x) and x != '\x1b[2K', lines))
assert re.match("Executing query with job ID: .*", updates[0])
assert all(re.match("Query executing: .*s", line)
for line in updates[1:-1])
assert re.match("Query complete after .*s", updates[-1])
assert isinstance(result, pandas.DataFrame)
assert len(result) == 10 # verify row count
assert list(result) == ['url', 'view_count'] # verify column names
def _job_done(instance):
return instance.state.lower() == 'done'
def _dataset_exists(ds):
try:
Config.CLIENT.get_dataset(DatasetReference(ds.project, ds.dataset_id))
return True
except NotFound:
return False
def _table_exists(t):
try:
tr = DatasetReference(t.project, t.dataset_id).table(t.table_id)
Config.CLIENT.get_table(tr)
return True
except NotFound:
return False
@pytest.fixture(scope='session')
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture()
def ipython_interactive(request, ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
| tseaver/gcloud-python | bigquery/tests/system.py | Python | apache-2.0 | 73,535 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# - Adam Friedman <[email protected]>
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: dimensiondata_vlan
short_description: Manage a VLAN in a Cloud Control network domain.
extends_documentation_fragment:
- dimensiondata
- dimensiondata_wait
description:
- Manage VLANs in Cloud Control network domains.
version_added: "2.5"
author: 'Adam Friedman (@tintoy)'
options:
name:
description:
- The name of the target VLAN.
- Required if C(state) is C(present).
required: false
description:
description:
- A description of the VLAN.
required: false
default: null
network_domain:
description:
- The Id or name of the target network domain.
required: true
private_ipv4_base_address:
description:
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
required: false
private_ipv4_prefix_size:
description:
- The size of the IPv4 address space, e.g 24.
- Required, if C(private_ipv4_base_address) is specified.
required: false
state:
description:
- The desired state for the target VLAN.
- C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
choices: [present, absent, readonly]
default: present
allow_expand:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently posesses?
- If C(False), the module will fail under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
required: false
default: False
'''
EXAMPLES = '''
# Add or update VLAN
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
description: A test VLAN
private_ipv4_base_address: 192.168.23.0
private_ipv4_prefix_size: 24
state: present
wait: yes
# Read / get VLAN details
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan1
state: readonly
wait: yes
# Delete a VLAN
- dimensiondata_vlan:
region: na
location: NA5
network_domain: test_network
name: my_vlan_1
state: absent
wait: yes
'''
RETURN = '''
vlan:
description: Dictionary describing the VLAN.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: VLAN ID.
type: string
sample: "aaaaa000-a000-4050-a215-2808934ccccc"
name:
description: VLAN name.
type: string
sample: "My VLAN"
description:
description: VLAN description.
type: string
sample: "My VLAN description"
location:
description: Datacenter location.
type: string
sample: NA3
private_ipv4_base_address:
description: The base address for the VLAN's private IPV4 network.
type: string
sample: 192.168.23.0
private_ipv4_prefix_size:
description: The prefix size for the VLAN's private IPV4 network.
type: int
sample: 24
private_ipv4_gateway_address:
description: The gateway address for the VLAN's private IPV4 network.
type: string
sample: 192.168.23.1
private_ipv6_base_address:
description: The base address for the VLAN's IPV6 network.
type: string
sample: 2402:9900:111:1195:0:0:0:0
private_ipv6_prefix_size:
description: The prefix size for the VLAN's IPV6 network.
type: int
sample: 64
private_ipv6_gateway_address:
description: The gateway address for the VLAN's IPV6 network.
type: string
sample: 2402:9900:111:1195:0:0:0:1
status:
description: VLAN status.
type: string
sample: NORMAL
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
try:
from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
HAS_LIBCLOUD = True
except ImportError:
DimensionDataVlan = None
HAS_LIBCLOUD = False
class DimensionDataVlanModule(DimensionDataModule):
"""
The dimensiondata_vlan module for Ansible.
"""
def __init__(self):
"""
Create a new Dimension Data VLAN module.
"""
super(DimensionDataVlanModule, self).__init__(
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(required=True, type='str'),
description=dict(default='', type='str'),
network_domain=dict(required=True, type='str'),
private_ipv4_base_address=dict(default='', type='str'),
private_ipv4_prefix_size=dict(default=0, type='int'),
allow_expand=dict(required=False, default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'readonly'])
),
required_together=DimensionDataModule.required_together()
)
)
self.name = self.module.params['name']
self.description = self.module.params['description']
self.network_domain_selector = self.module.params['network_domain']
self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
self.state = self.module.params['state']
self.allow_expand = self.module.params['allow_expand']
if self.wait and self.state != 'present':
self.module.fail_json(
msg='The wait parameter is only supported when state is "present".'
)
def state_present(self):
"""
Ensure that the target VLAN is present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
self.name, self.network_domain_selector
),
changed=True
)
vlan = self._create_vlan(network_domain)
self.module.exit_json(
msg='Created VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
else:
diff = VlanDiff(vlan, self.module.params)
if not diff.has_changes():
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=False
)
return
try:
diff.ensure_legal_change()
except InvalidVlanChangeError as invalid_vlan_change:
self.module.fail_json(
msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
self.name, self.network_domain_selector, invalid_vlan_change
)
)
if diff.needs_expand() and not self.allow_expand:
self.module.fail_json(
msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
self.private_ipv4_prefix_size
) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
vlan.private_ipv4_range_size
) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
)
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
if diff.needs_edit():
vlan.name = self.name
vlan.description = self.description
self.driver.ex_update_vlan(vlan)
if diff.needs_expand():
vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
self.driver.ex_expand_vlan(vlan)
self.module.exit_json(
msg='Updated VLAN "{0}" in network domain "{1}".'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
def state_readonly(self):
"""
Read the target VLAN's state.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if vlan:
self.module.exit_json(
vlan=vlan_to_dict(vlan),
changed=False
)
else:
self.module.fail_json(
msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
self.name, self.network_domain_selector
)
)
def state_absent(self):
"""
Ensure that the target VLAN is not present.
"""
network_domain = self._get_network_domain()
vlan = self._get_vlan(network_domain)
if not vlan:
self.module.exit_json(
msg='VLAN "{0}" is absent from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=False
)
return
if self.module.check_mode:
self.module.exit_json(
msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
self.name, self.network_domain_selector
),
vlan=vlan_to_dict(vlan),
changed=True
)
self._delete_vlan(vlan)
self.module.exit_json(
msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
self.name, self.network_domain_selector
),
changed=True
)
def _get_vlan(self, network_domain):
"""
Retrieve the target VLAN details from CloudControl.
:param network_domain: The target network domain.
:return: The VLAN, or None if the target VLAN was not found.
:rtype: DimensionDataVlan
"""
vlans = self.driver.ex_list_vlans(
location=self.location,
network_domain=network_domain
)
matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
if matching_vlans:
return matching_vlans[0]
return None
def _create_vlan(self, network_domain):
vlan = self.driver.ex_create_vlan(
network_domain,
self.name,
self.private_ipv4_base_address,
self.description,
self.private_ipv4_prefix_size
)
if self.wait:
vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
return vlan
def _delete_vlan(self, vlan):
try:
self.driver.ex_delete_vlan(vlan)
# Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
if self.wait:
self._wait_for_vlan_state(vlan, 'NOT_FOUND')
except DimensionDataAPIException as api_exception:
self.module.fail_json(
msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
vlan.id, api_exception.msg
)
)
def _wait_for_vlan_state(self, vlan, state_to_wait_for):
network_domain = self._get_network_domain()
wait_poll_interval = self.module.params['wait_poll_interval']
wait_time = self.module.params['wait_time']
# Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
try:
return self.driver.connection.wait_for_state(
state_to_wait_for,
self.driver.ex_get_vlan,
wait_poll_interval,
wait_time,
vlan
)
except DimensionDataAPIException as api_exception:
if api_exception.code != 'RESOURCE_NOT_FOUND':
raise
return DimensionDataVlan(
id=vlan.id,
status='NOT_FOUND',
name='',
description='',
private_ipv4_range_address='',
private_ipv4_range_size=0,
ipv4_gateway='',
ipv6_range_address='',
ipv6_range_size=0,
ipv6_gateway='',
location=self.location,
network_domain=network_domain
)
def _get_network_domain(self):
"""
Retrieve the target network domain from the Cloud Control API.
:return: The network domain.
"""
try:
return self.get_network_domain(
self.network_domain_selector, self.location
)
except UnknownNetworkError:
self.module.fail_json(
msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
self.network_domain_selector, self.location
)
)
return None
class InvalidVlanChangeError(Exception):
"""
Error raised when an illegal change to VLAN state is attempted.
"""
pass
class VlanDiff(object):
"""
Represents differences between VLAN information (from CloudControl) and module parameters.
"""
def __init__(self, vlan, module_params):
"""
:param vlan: The VLAN information from CloudControl.
:type vlan: DimensionDataVlan
:param module_params: The module parameters.
:type module_params: dict
"""
self.vlan = vlan
self.module_params = module_params
self.name_changed = module_params['name'] != vlan.name
self.description_changed = module_params['description'] != vlan.description
self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
# Is configured prefix size greater than or less than the actual prefix size?
private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
def has_changes(self):
"""
Does the VlanDiff represent any changes between the VLAN and module configuration?
:return: True, if there are change changes; otherwise, False.
"""
return self.needs_edit() or self.needs_expand()
def ensure_legal_change(self):
"""
Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
- private_ipv4_base_address cannot be changed
- private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
:raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
"""
# Cannot change base address for private IPv4 network.
if self.private_ipv4_base_address_changed:
raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
# Cannot shrink private IPv4 network (by increasing prefix size).
if self.private_ipv4_prefix_size_increased:
raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
def needs_edit(self):
"""
Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
:return: True, if an Edit operation is required; otherwise, False.
"""
return self.name_changed or self.description_changed
def needs_expand(self):
"""
Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
The VLAN's network is expanded by reducing the size of its network prefix.
:return: True, if an Expand operation is required; otherwise, False.
"""
return self.private_ipv4_prefix_size_decreased
def vlan_to_dict(vlan):
return {
'id': vlan.id,
'name': vlan.name,
'description': vlan.description,
'location': vlan.location.id,
'private_ipv4_base_address': vlan.private_ipv4_range_address,
'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
'private_ipv4_gateway_address': vlan.ipv4_gateway,
'ipv6_base_address': vlan.ipv6_range_address,
'ipv6_prefix_size': vlan.ipv6_range_size,
'ipv6_gateway_address': vlan.ipv6_gateway,
'status': vlan.status
}
def main():
module = DimensionDataVlanModule()
if module.state == 'present':
module.state_present()
elif module.state == 'readonly':
module.state_readonly()
elif module.state == 'absent':
module.state_absent()
if __name__ == '__main__':
main()
| wrouesnel/ansible | lib/ansible/modules/cloud/dimensiondata/dimensiondata_vlan.py | Python | gpl-3.0 | 19,352 |
# Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Michael Scherer <[email protected]>
connection: libvirt_lxc
short_description: Run tasks in lxc containers via libvirt
description:
- Run commands or put/fetch files to an existing lxc container using libvirt
version_added: "2.0"
options:
remote_addr:
description:
- Container identifier
default: The set user as per docker's configuration
vars:
- name: ansible_host
- name: ansible_libvirt_lxc_host
"""
import distutils.spawn
import os
import os.path
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'libvirt_lxc'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.lxc = self._play_context.remote_addr
self.virsh = self._search_executable('virsh')
self._check_domain(self.lxc)
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def _check_domain(self, domain):
p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
raise AnsibleError("%s is not a lxc defined in libvirt" % domain)
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace']
if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL:
local_cmd += ['--noseclabel']
local_cmd += [self.lxc, '--', executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd,), host=self.lxc)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
if not os.fstat(in_file.fileno()).st_size:
count = ' count=0'
else:
count = ''
try:
p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| caphrim007/ansible | lib/ansible/plugins/connection/libvirt_lxc.py | Python | gpl-3.0 | 7,515 |
# RTS2 libraries
# (C) 2009-2012 Petr Kubanek <[email protected]>
# (C) 2010-2016 Petr Kubanek, Institute of Physics
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# populate namescpace with handy classes
from scriptcomm import Rts2Comm,Rts2Exception,Rts2NotActive
from imgprocess import ImgProcess
from centering import Centering
from flats import Flat,FlatScript
from json import getProxy,createProxy
from queue import Queue,QueueEntry
from queues import Queues
from sextractor import Sextractor
from focusing import Focusing
from gpoint import GPoint
| zguangyu/rts2 | python/rts2/__init__.py | Python | gpl-2.0 | 1,237 |
import logging
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
ColumnAggregateMetricProvider,
column_aggregate_value,
)
from great_expectations.expectations.metrics.util import (
_scipy_distribution_positional_args_from_dict,
validate_distribution_parameters,
)
logger = logging.getLogger(__name__)
try:
from pyspark.sql.functions import stddev_samp
except ImportError as e:
logger.debug(str(e))
logger.debug(
"Unable to load spark context; install optional spark dependency for support."
)
from scipy import stats
class ColumnParameterizedDistributionKSTestPValue(ColumnAggregateMetricProvider):
"""MetricProvider Class for Aggregate Standard Deviation metric"""
metric_name = "column.parameterized_distribution_ks_test_p_value"
value_keys = ("distribution", "p_value", "params")
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, distribution, p_value=0.05, params=None, **kwargs):
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive")
# Validate params
try:
validate_distribution_parameters(distribution=distribution, params=params)
except ValueError as e:
raise e
# Format arguments for scipy.kstest
if isinstance(params, dict):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params
)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution, args=positional_parameters)
return ks_result
| great-expectations/great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_parameterized_distribution_ks_test_p_value.py | Python | apache-2.0 | 1,785 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session(use_gpu=True) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# test that we can pass a scalar Tensor as num_splits
for axis in [0, -2]:
with self.cached_session(use_gpu=True) as sess:
result = sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=array_ops.ones([2, 2]).get_shape()[1],
axis=axis))
self.assertEqual(result[0].shape, (2, 4))
self.assertEqual(result[1].shape, (2, 4))
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session(use_gpu=True) as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session(use_gpu=True) as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.to_int32(5)
b = math_ops.to_int32(6)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session(use_gpu=True):
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegexp(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session(use_gpu=True) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
if __name__ == "__main__":
test.main()
| jendap/tensorflow | tensorflow/python/kernel_tests/split_op_test.py | Python | apache-2.0 | 14,937 |
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import errno
import os
from vdsm.network import cmd
EXT_TC = '/sbin/tc'
_TC_ERR_PREFIX = 'RTNETLINK answers: '
_errno_trans = dict(((os.strerror(code), code) for code in errno.errorcode))
def process_request(command):
command.insert(0, EXT_TC)
retcode, out, err = cmd.exec_sync(command)
if retcode != 0:
if retcode == 2 and err:
for err_line in err.splitlines():
if err_line.startswith(_TC_ERR_PREFIX):
err = err_line
retcode = _errno_trans.get(
err[len(_TC_ERR_PREFIX) :].strip() # noqa: E203
)
break
raise TrafficControlException(retcode, err, command)
return out
class TrafficControlException(Exception):
def __init__(self, errCode, message, command):
self.errCode = errCode
self.msg = message
self.command = command
Exception.__init__(self, self.errCode, self.msg, self.command)
| nirs/vdsm | lib/vdsm/network/tc/_wrapper.py | Python | gpl-2.0 | 1,879 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
class account_analytic_tag(models.Model):
_name = 'account.analytic.tag'
_description = 'Analytic Tags'
name = fields.Char(string='Analytic Tag', index=True, required=True)
color = fields.Integer('Color Index')
class account_analytic_account(models.Model):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_order = 'code, name asc'
@api.multi
def _compute_debit_credit_balance(self):
domain = [('account_id', 'in', self.ids),
('company_id', '=', self.env.user.company_id.id)]
if self._context.get('from_date', False):
domain += [('date', '>=', self._context['from_date'])]
if self._context.get('to_date', False):
domain += [('date', '<=', self._context['to_date'])]
# compute debits
debit_domain = domain + [('amount', '<', 0.0)]
debits = self.env['account.analytic.line'].read_group(
debit_domain, ['account_id', 'amount'], ['account_id'])
debits = {amount['account_id'][0]: amount['amount']
for amount in debits}
# compute credits
credit_domain = domain + [('amount', '>', 0.0)]
credits = self.env['account.analytic.line'].read_group(
credit_domain, ['account_id', 'amount'], ['account_id'])
credits = {amount['account_id'][0]: amount['amount']
for amount in credits}
for account in self:
account.credit = credits.get(account.id, 0.0)
account.debit = abs(debits.get(account.id, 0.0))
account.balance = account.credit - account.debit
name = fields.Char(string='Analytic Account', index=True, required=True, track_visibility='onchange')
code = fields.Char(string='Reference', index=True, track_visibility='onchange')
# FIXME: we reused account_type to implement the closed accounts (feature removed by mistake on release of v9) without modifying the schemas on already released v9, but it would be more clean to rename it
account_type = fields.Selection([
('normal', 'Active'),
('closed', 'Archived')
], string='State', required=True, default='normal')
tag_ids = fields.Many2many('account.analytic.tag', 'account_analytic_account_tag_rel', 'account_id', 'tag_id', string='Tags', copy=True)
line_ids = fields.One2many('account.analytic.line', 'account_id', string="Analytic Lines")
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
# use auto_join to speed up name_search call
partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True)
balance = fields.Monetary(compute='_compute_debit_credit_balance', string='Balance')
debit = fields.Monetary(compute='_compute_debit_credit_balance', string='Debit')
credit = fields.Monetary(compute='_compute_debit_credit_balance', string='Credit')
currency_id = fields.Many2one(related="company_id.currency_id", string="Currency", readonly=True)
@api.multi
def name_get(self):
res = []
for analytic in self:
name = analytic.name
if analytic.code:
name = '['+analytic.code+'] '+name
if analytic.partner_id:
name = name +' - '+analytic.partner_id.commercial_partner_id.name
res.append((analytic.id, name))
return res
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
if operator not in ('ilike', 'like', '=', '=like', '=ilike'):
return super(account_analytic_account, self).name_search(name, args, operator, limit)
args = args or []
domain = ['|', ('code', operator, name), ('name', operator, name)]
partners = self.env['res.partner'].search([('name', operator, name)], limit=limit)
if partners:
domain = ['|'] + domain + [('partner_id', 'in', partners.ids)]
recs = self.search(domain + args, limit=limit)
return recs.name_get()
class account_analytic_line(models.Model):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_order = 'date desc, id desc'
@api.model
def _default_user(self):
return self.env.context.get('user_id', self.env.user.id)
name = fields.Char('Description', required=True)
date = fields.Date('Date', required=True, index=True, default=fields.Date.context_today)
amount = fields.Monetary('Amount', required=True, default=0.0)
unit_amount = fields.Float('Quantity', default=0.0)
account_id = fields.Many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', index=True)
partner_id = fields.Many2one('res.partner', string='Partner')
user_id = fields.Many2one('res.users', string='User', default=_default_user)
tag_ids = fields.Many2many('account.analytic.tag', 'account_analytic_line_tag_rel', 'line_id', 'tag_id', string='Tags', copy=True)
company_id = fields.Many2one(related='account_id.company_id', string='Company', store=True, readonly=True)
currency_id = fields.Many2one(related="company_id.currency_id", string="Currency", readonly=True)
| Elico-Corp/odoo_OCB | addons/analytic/models/analytic.py | Python | agpl-3.0 | 5,398 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgspointdisplacementrenderer.py
-----------------------------
Date : September 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
From build dir, run: ctest -R PyQgsPointDisplacementRenderer -V
"""
__author__ = 'Nyall Dawson'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import qgis # NOQA
import os
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtCore import QSize, QThreadPool, QDir
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsPointDisplacementRenderer,
QgsFontUtils,
QgsUnitTypes,
QgsMapUnitScale,
QgsMarkerSymbol,
QgsCategorizedSymbolRenderer,
QgsRendererCategory,
QgsSingleSymbolRenderer,
QgsPointClusterRenderer,
QgsMapSettings,
QgsProperty,
QgsReadWriteContext,
QgsSymbolLayer,
QgsRenderContext
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsPointDisplacementRenderer(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsPointDisplacementRenderer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def _setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
layer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
QgsProject.instance().addMapLayer(layer)
renderer = QgsPointDisplacementRenderer()
sym1 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '3', 'outline_style': 'no'})
sym_renderer = QgsSingleSymbolRenderer(sym1)
renderer.setEmbeddedRenderer(sym_renderer)
renderer.setCircleRadiusAddition(2)
renderer.setCircleWidth(1)
renderer.setCircleColor(QColor(0, 0, 0))
renderer.setCenterSymbol(QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'}))
layer.setRenderer(renderer)
rendered_layers = [layer]
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setExtent(QgsRectangle(-123, 18, -70, 52))
mapsettings.setLayers(rendered_layers)
return layer, renderer, mapsettings
def _tearDown(self, layer):
# QgsProject.instance().removeAllMapLayers()
QgsProject.instance().removeMapLayer(layer)
def _setProperties(self, r):
""" set properties for a renderer for testing with _checkProperties"""
r.setLabelAttributeName('name')
f = QgsFontUtils.getStandardTestFont('Bold Oblique', 14)
r.setLabelFont(f)
r.setMinimumLabelScale(50000)
r.setLabelColor(QColor(255, 0, 0))
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
r.setCircleWidth(15)
r.setCircleColor(QColor(0, 255, 0))
r.setCircleRadiusAddition(2.5)
r.setPlacement(QgsPointDisplacementRenderer.ConcentricRings)
r.setLabelDistanceFactor(0.25)
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setCenterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
def _checkProperties(self, r):
""" test properties of renderer against expected"""
self.assertEqual(r.labelAttributeName(), 'name')
f = QgsFontUtils.getStandardTestFont('Bold Oblique', 14)
self.assertEqual(r.labelFont().styleName(), f.styleName())
self.assertEqual(r.minimumLabelScale(), 50000)
self.assertEqual(r.labelColor(), QColor(255, 0, 0))
self.assertEqual(r.tolerance(), 5)
self.assertEqual(r.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(r.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(r.circleWidth(), 15)
self.assertEqual(r.circleColor(), QColor(0, 255, 0))
self.assertEqual(r.circleRadiusAddition(), 2.5)
self.assertEqual(r.placement(), QgsPointDisplacementRenderer.ConcentricRings)
self.assertEqual(r.centerSymbol().color(), QColor(0, 255, 0))
self.assertEqual(r.embeddedRenderer().symbol().color().name(), '#fdbf6f')
self.assertEqual(r.labelDistanceFactor(), 0.25)
def _create_categorized_renderer(self):
cat_renderer = QgsCategorizedSymbolRenderer(attrName='Class')
sym1 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '6', 'outline_style': 'no'})
cat1 = QgsRendererCategory('Biplane', sym1, 'Big')
cat_renderer.addCategory(cat1)
sym2 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '3', 'outline_style': 'no'})
cat2 = QgsRendererCategory(['B52', 'Jet'], sym2, 'Smaller')
cat_renderer.addCategory(cat2)
return cat_renderer
def testGettersSetters(self):
""" test getters and setters """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
self._checkProperties(r)
def testClone(self):
""" test cloning renderer """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
c = r.clone()
self._checkProperties(c)
def testSaveCreate(self):
""" test saving and recreating from XML """
r = QgsPointDisplacementRenderer()
self._setProperties(r)
doc = QDomDocument("testdoc")
elem = r.save(doc, QgsReadWriteContext())
c = QgsPointDisplacementRenderer.create(elem, QgsReadWriteContext())
self._checkProperties(c)
def testConvert(self):
""" test renderer conversion """
# same type, should clone
r = QgsPointDisplacementRenderer()
self._setProperties(r)
c = QgsPointDisplacementRenderer.convertFromRenderer(r)
self._checkProperties(c)
# test conversion from cluster renderer
r = QgsPointClusterRenderer()
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setClusterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
# want to keep as many settings as possible when converting between cluster and displacement renderer
d = QgsPointDisplacementRenderer.convertFromRenderer(r)
self.assertEqual(d.tolerance(), 5)
self.assertEqual(d.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(d.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(d.centerSymbol().color(), QColor(0, 255, 0))
self.assertEqual(d.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testRenderNoCluster(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(1)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_no_cluster')
res = renderchecker.runTest('displacement_no_cluster')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testRenderWithin(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster')
res = renderchecker.runTest('expected_displacement_cluster')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testRenderVariables(self):
""" test rendering with expression variables in marker """
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
old_marker = layer.renderer().centerSymbol().clone()
new_marker = QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'})
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor,
QgsProperty.fromExpression('@cluster_color'))
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertySize,
QgsProperty.fromExpression('@cluster_size*2'))
layer.renderer().setCenterSymbol(new_marker)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_variables')
result = renderchecker.runTest('expected_displacement_variables')
self.report += renderchecker.report()
layer.renderer().setCenterSymbol(old_marker)
self.assertTrue(result)
self._tearDown(layer)
def testRenderGrid(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.Grid)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_grid')
res = renderchecker.runTest('expected_displacement_grid')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testRenderGridAdjust(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
layer.renderer().setCircleRadiusAddition(5)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.Grid)
layer.renderer().setCircleColor(QColor())
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_adjust_grid')
res = renderchecker.runTest('expected_displacement_adjust_grid')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterRingLabels(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_ring_labels')
res = renderchecker.runTest('expected_displacement_cluster_ring_labels')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterGridLabels(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.Grid)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_grid_labels')
res = renderchecker.runTest('expected_displacement_cluster_grid_labels')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterConcentricLabels(self):
layer, renderer, mapsettings = self._setUp()
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.ConcentricRings)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_concentric_labels')
res = renderchecker.runTest('expected_displacement_cluster_concentric_labels')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterRingLabelsDifferentSizes(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_ring_labels_diff_size')
res = renderchecker.runTest('expected_displacement_cluster_ring_labels_diff_size')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterGridLabelsDifferentSizes(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.Grid)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_grid_labels_diff_size')
res = renderchecker.runTest('expected_displacement_cluster_grid_labels_diff_size')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterConcentricLabelsDifferentSizes(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(0.35)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.ConcentricRings)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_concentric_labels_diff_size')
res = renderchecker.runTest('expected_displacement_cluster_concentric_labels_diff_size')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterRingLabelsDifferentSizesFarther(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(1)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_ring_labels_diff_size_farther')
res = renderchecker.runTest('expected_displacement_cluster_ring_labels_diff_size_farther')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterGridLabelsDifferentSizesFarther(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(1)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.Grid)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_grid_labels_diff_size_farther')
res = renderchecker.runTest('expected_displacement_cluster_grid_labels_diff_size_farther')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testClusterConcentricLabelsDifferentSizesFarther(self):
layer, renderer, mapsettings = self._setUp()
renderer.setEmbeddedRenderer(self._create_categorized_renderer())
layer.renderer().setTolerance(10)
layer.renderer().setLabelAttributeName('Class')
layer.renderer().setLabelDistanceFactor(1)
f = QgsFontUtils.getStandardTestFont('Bold', 14)
layer.renderer().setLabelFont(f)
layer.renderer().setPlacement(QgsPointDisplacementRenderer.ConcentricRings)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('displacement_renderer')
renderchecker.setControlName('expected_displacement_cluster_concentric_labels_diff_size_farther')
res = renderchecker.runTest('expected_displacement_cluster_concentric_labels_diff_size_farther')
self.report += renderchecker.report()
self.assertTrue(res)
self._tearDown(layer)
def testUsedAttributes(self):
layer, renderer, mapsettings = self._setUp()
ctx = QgsRenderContext.fromMapSettings(mapsettings)
self.assertCountEqual(renderer.usedAttributes(ctx), {})
if __name__ == '__main__':
unittest.main()
| uclaros/QGIS | tests/src/python/test_qgspointdisplacementrenderer.py | Python | gpl-2.0 | 20,496 |
# Copyright (C) 2011 Philter Phactory Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X
# CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Philter Phactory Ltd. shall
# not be used in advertising or otherwise to promote the sale, use or other
# dealings in this Software without prior written authorization from Philter
# Phactory Ltd..
#
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
(r'^(?P<view_name>[^/]+)/(?P<weavr_name>[^/]+)$',
views.generic_js_view),
(r'^api/(?P<weavr_name>[^/]+)/(?P<method>[^/]+)/$',
views.apiProxy),
)
| philterphactory/prosthetic-runner | jswrapper/urls.py | Python | mit | 1,676 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import pygame
from pygame.locals import *
import util
class Tile:
def __init__(self, color, image = None):
self.color = color
self.image = image
class Shape(object):
SHAPE_WIDTH = 4
SHAPE_HEIGHT = 4
SHAPES = (
( ((0,0,0,0), #
(0,1,1,0), # [][]
(0,1,1,0), # [][]
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,1), # [][][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(0,1,0,0), # []
(0,1,0,0),), # []
),
( ((0,0,0,0), #
(0,1,1,0), # [][]
(1,1,0,0), # [][]
(0,0,0,0),), #
((1,0,0,0), # []
(1,1,0,0), # [][]
(0,1,0,0), # []
(0,0,0,0),),
),
( ((0,0,0,0), #
(1,1,0,0), # [][]
(0,1,1,0), # [][]
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,0,0), # [][]
(1,0,0,0), # []
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(1,0,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(0,1,1,0), # [][]
(0,0,0,0),), #
((0,0,1,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((1,1,0,0), # [][]
(0,1,0,0), # []
(0,1,0,0), # []
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(0,0,1,0), # []
(0,0,0,0),), #
((0,1,1,0), # [][]
(0,1,0,0), # []
(0,1,0,0), # []
(0,0,0,0),), #
((1,0,0,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,0,0), # []
(1,1,0,0), # [][]
(0,0,0,0),), #
),
( ((0,0,0,0), #
(1,1,1,0), # [][][]
(0,1,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(0,1,1,0), # [][]
(0,1,0,0), # []
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,1,0), # [][][]
(0,0,0,0), #
(0,0,0,0),), #
((0,1,0,0), # []
(1,1,0,0), # [][]
(0,1,0,0), # []
(0,0,0,0),), #
),
)
COLORS = ((0xcc, 0x66, 0x66),(0x66, 0xcc, 0x66), \
(0x66, 0x66, 0xcc),(0xcc, 0xcc, 0x66), \
(0xcc, 0x66, 0xcc),(0x66, 0xcc, 0xcc), \
(0xda, 0xaa, 0x00))
def __init__(self, board_start, (board_w, board_h), (w, h)):
self.start = board_start
self.W, self.H = w, h #width of board
self.length = board_w / w #width/height of a title
self.x = 0
self.y = 0
self.index = 0 # the type of shape
self.indexN = 0 # the type of next shape
self.subindex = 0 # the index in the specil shape
self.shapes = [] # record the shapes
self.color = ()
self.shape = None
self.image = pygame.Surface((self.length*self.SHAPE_WIDTH, \
self.length*self.SHAPE_HEIGHT), SRCALPHA, 32)
self.image_next = pygame.Surface((self.length*self.SHAPE_WIDTH, \
self.length*self.SHAPE_HEIGHT), SRCALPHA, 32)
self.board = [] #the current board stat
self.new()
def set_board(self, board):
self.board = board
def new(self):
self.x = self.W /2 - 2
self.y = 0
self.index = self.indexN
self.shapes = self.SHAPES[self.index]
self.subindex = random.randint(0, len(self.shapes) - 1)
self.color = self.COLORS[self.index]
self.shape = self.shapes[self.subindex]
self.indexN = random.randint(0, len(self.SHAPES) - 1)
self.draw_current_shape()
self.draw_next_shape()
def rotate(self):
self.subindex = (self.subindex + 1) % len(self.shapes)
self.shape = self.shapes[self.subindex]
if self.check_legal():
pass
else:
self.subindex = (self.subindex - 1) % len(self.shapes)
self.shape = self.shapes[self.subindex]
self.draw_current_shape()
def check_legal(self, r=0, c=0):
for x in xrange(self.SHAPE_WIDTH):
for y in xrange(self.SHAPE_HEIGHT):
if (self.shape[y][x] and # a tile there
(self.x+x+r < 0 or # left outside
self.x+x+r >= self.W or # right outside
self.y+y+c >= self.H or # bottom outside
self.board[self.y+y+c][self.x+x+r] # tile cover
)):
return False
return True
def move(self, r, c):
if self.check_legal(r, c):
self.x += r
self.y += c
def at_bottom(self):
for x in xrange(self.SHAPE_WIDTH):
for y in xrange(self.SHAPE_HEIGHT - 1, -1, -1):
if (self.shape[y][x] and \
(self.y+y+1 >= self. H or \
self.board[self.y+y+1][self.x+x])):
return True
return False
def draw_current_shape(self):
self._draw_shape(self.image, self.index, self.subindex)
def draw_next_shape(self):
self._draw_shape(self.image_next, self.indexN)
def _draw_shape(self, surface, index, subindex = -1):
""" Draw the shape to surface """
surface.fill((0, 0, 0, 0))
if subindex == -1:
subindex = 0
shape = self.SHAPES[index][subindex]
color = self.COLORS[index]
for x in xrange(self.SHAPE_HEIGHT):
for y in xrange(self.SHAPE_WIDTH):
if shape[x][y]:
surface.fill(color, \
(y*self.length, x*self.length, \
self.length, self.length))
pygame.draw.rect(surface, \
(255, 255, 255, 100), \
(y*self.length, x*self.length, self.length, self.length), \
1)
def draw(self, screen):
screen.blit(self.image, (self.start[0]+self.length*self.x, \
self.start[1]+self.length*self.y))
class Shape2(Shape):
def __init__(self, board_start, (board_width, board_height), (w, h)):
self.SHAPES = ((((1,) * w, ), ), )
self.SHAPE_WIDTH = w
self.SHAPE_HEIGHT = 1
super(Shape2, self).__init__(board_start, \
(board_width, board_height), (w, h))
def new(self):
self.x = 0
self.y = 0
self.index = self.indexN
self.shapes = self.SHAPES[self.index]
self.subindex = random.randint(0, len(self.shapes) - 1)
self.color = self.COLORS[self.index]
self.shape = self.shapes[self.subindex]
self.indexN = random.randint(0, len(self.SHAPES) - 1)
self.draw_current_shape()
self.draw_next_shape()
def draw_next_shape(self):
pass
class Shape4(Shape):
def __init__(self, board_start, (board_width, board_height), (w, h)):
self.SHAPES += (
(((1,1,1,1), (1,1,1,1), (1,1,1,1), (1,1,1,1)),),
)
self.COLORS += ((0, 0, 0),)
self._image = {}
self.image[7] = pygame.image.load(util.file_path('neko.png')).convert_alpha()
super(Shape4, self).__init__(board_start, (board_width, board_height), (w, h))
def _draw_shape(self, surface, index, subindex = -1):
surface.fill((0, 0, 0,0))
if index > 6:
surface.blit(self._image[index], (0, 0))
else:
super(Shape4, self)._draw_shape(surface, index, subindex)
def get_part_image(self, x, y):
return self._image[self.index].subsurface( \
(y*self.length, x*self.length), (self.length, self.length))
| kwailamchan/programming-languages | python/pygame/tetris/lib/shape.py | Python | mit | 8,684 |
import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import Geometric
from diffprivlib.mechanisms.transforms import StringToInt
from diffprivlib.utils import global_seed
global_seed(3141592653)
class TestStringToInt(TestCase):
def test_not_none(self):
mech = StringToInt(Geometric(epsilon=1))
self.assertIsNotNone(mech)
_mech = mech.copy()
self.assertIsNotNone(_mech)
def test_class(self):
from diffprivlib.mechanisms import DPMachine
from diffprivlib.mechanisms.transforms import DPTransformer
self.assertTrue(issubclass(StringToInt, DPMachine))
self.assertTrue(issubclass(StringToInt, DPTransformer))
def test_no_parent(self):
with self.assertRaises(TypeError):
StringToInt()
def test_randomise(self):
mech = StringToInt(Geometric(epsilon=1))
self.assertIsInstance(mech.randomise("1"), str)
def test_distrib(self):
epsilon = 1.0
runs = 10000
mech = StringToInt(Geometric(epsilon=epsilon))
count = [0, 0]
for _ in range(runs):
if mech.randomise("0") == "0":
count[0] += 1
if mech.randomise("1") == "0":
count[1] += 1
self.assertGreater(count[0], count[1])
self.assertLessEqual(count[0] / runs, count[1] * np.exp(epsilon) / runs + 0.05)
| IBM/differential-privacy-library | tests/mechanisms/transforms/test_StringToInt.py | Python | mit | 1,405 |
import fastlmm.inference.lmm_cov as lmm
import numpy as np
import fastlmm.util.stats.chi2mixture as c2
import fastlmm.association as association
import scipy.stats as st
import tests_util as tu
class lrt(association.varcomp_test):
__slots__ = ["lmm","lrt","forcefullrank","nullModel","altModel","G0","K0","__testGcalled","model0","model1"]
def __init__(self, Y, X=None, appendbias=False, forcefullrank=False, G0=None, K0=None, nullModel=None,altModel=None):
association.varcomp_test.__init__(self,Y=Y,X=X,appendbias=appendbias)
N = self.Y.shape[0]
self.forcefullrank=forcefullrank
self.nullModel = nullModel
self.altModel = altModel
self.G0=G0
self.K0=K0
self.__testGcalled=False
self.lmm = lmm.LMM(forcefullrank=self.forcefullrank, X=self.X, linreg=None, Y=self.Y[:,np.newaxis], G=self.G0, K=self.K0, regressX=True)
self.model0 = self.lmm.findH2()# The null model only has a single kernel and only needs to find h2
self.model1=None
@property
def _testGcalled(self):
return self.__testGcalled
def testG(self, G1, type=None,i_exclude=None,G_exclude=None):
self.__testGcalled=True
#compute the alternative likelihood
(lik1,stat,alteqnull) = self._altModelMixedEffectLinear(G1,i_exclude=i_exclude,G_exclude=G_exclude)
#due to optimization the alternative log-likelihood might be a about 1E-6 worse than the null log-likelihood
pvreg = (st.chi2.sf(stat,1.0)) #standard way to compute p-value when no boundary conditions
if np.isnan(pvreg) or pvreg>1.0:
pvreg=1.0
pv = 0.5*pvreg #conservative 50/50 estimate
if alteqnull: pv=1.0 #chi_0 component
test={
'pv':pv,
'stat':stat,
'lik1':lik1,
'lik0':self.model0,
'alteqnull':alteqnull
}
return test
def _altModelMixedEffectLinear(self, G1,tol=0.0,i_exclude=None,G_exclude=None):
lik0=self.model0
G, i_G1, n_exclude = tu.set_Gexclude(G_exclude, G1, i_exclude)
UGup,UUGup = self.lmm.rotate(G)
i_up=~i_G1
#update null model if SNPs are excluded:
if n_exclude:
if UUGup is not None:
UUGup_=UUGup[:,0:n_exclude]
else:
UUGup_=None
lik0 = self.lmm.findH2_2K(nGridH2=100, minH2 = 0.0, maxH2 = 0.99999, i_up=i_up[0:n_exclude], i_G1=i_G1[0:n_exclude], UW=UGup[:,0:n_exclude], UUW=UUGup_)#The alternative model has two kernels and needs to find both a2 and h2
#build indicator for test SNPs (i_G1) and excluded SNPs (i_up)
#we currently don't account for exclusion of snps in G1 (low rank update could be even more low rank)
#alternative model likelihood:
lik1 = self.lmm.findH2_2K(nGridH2=100, minH2 = 0.0, maxH2 = 0.99999, i_up=i_up, i_G1=i_G1, UW=UGup, UUW=UUGup)#The alternative model has two kernels and needs to find both a2 and h2
try:
alteqnull=lik1['h2_1'][0]<=(0.0+tol)
except:
alteqnull=lik1['h2_1']<=(0.0+tol)
stat = 2.0*(lik0['nLL'][0] - lik1['nLL'][0])
self.model1=lik1
return (lik1,stat,alteqnull)
class LRT_up(object):
__slots__ = ["model0","model1","lrt","forcefullrank","nullModel","altModel","G0","__testGcalled"]
"""description of class"""
def check_nperm(self,nperm):
return nperm #permutations are fine, so just return
def __str__(self):
return "lrt_up"
def construct(self, Y, X=None, forcefullrank = False, SNPs0 = None, i_exclude=None, nullModel = None, altModel = None,
scoring = None, greater_is_better = None):
G0,K0=tu.set_snps0(SNPs0=SNPs0,sample_size=Y.shape[0],i_exclude=i_exclude)
print "constructing LMM - this should only happen once."
return lrt(Y, X=X, forcefullrank=forcefullrank, G0=G0, K0=K0, nullModel=nullModel,altModel=altModel)
def pv(squaredform,expectationsqform,varsqform,GPG):
raise Exception("'pv' doesn't apply to lrt only to davies")
@property
def npvals(self):
return 1 # return only 1 type of p-value
def w2(self, G0, result):
if G0 is not None:
return result.h2_1
else:
raise NotImplementedError("only with backgr. K")
def lrt(self, result):
return result.stat
def pv_adj_from_result(self, result):
'''
If local aUD exists, take that, if not, take the raw local.
'''
if result.test.has_key("pv-local-aUD") and not np.isnan(result.test["pv-local-aUD"]):
return result.test["pv-local-aUD"]
elif result.test.has_key("pv-local"):
return result.test["pv-local"]
else:
return np.nan
def pv_adj_and_ind(self, nperm, pv_adj, nullfit, lrt, lrtperm,
alteqnull, alteqnullperm, qmax, nullfitfile, nlocalperm):
if nlocalperm>0: #don't do the fitting
ind = pv_adj.argsort()
return pv_adj, ind
from fastlmm.association.tests import Cv
return Cv.pv_adj_and_ind(nperm, pv_adj, nullfit, lrt, lrtperm,
alteqnull, alteqnullperm, qmax, nullfitfile, nlocalperm) # call the shared version of this method
def write(self, fp,ind, result_dict, pv_adj, detailed_table, signal_ratio=True):
if result_dict[0].test.has_key("pv-local-aUD"):
# in this case, for p_adj, we use pv-local-aUD if it exists, and otherwise
# pv-local. So don't know which is which in the "P-value adjusted" column. To
# disambiguate, also print out "pv-local" here
colnames = ["SetId", "LogLikeAlt", "LogLikeNull", "P-value_adjusted","P-value-local",
"P-value(50/50)", "#SNPs_in_Set", "#ExcludedSNPs", "chrm", "pos. range"]
else:
colnames = ["SetId", "LogLikeAlt", "LogLikeNull", "P-value_adjusted",
"P-value(50/50)", "#SNPs_in_Set", "#ExcludedSNPs", "chrm", "pos. range"]
if signal_ratio:
colnames.append("Alt_h2")
colnames.append("Alt_h2_1")
head = "\t".join(colnames)
if detailed_table:
lik1Info = result_dict[0].lik1Details
lik0Info = result_dict[0].lik0Details
altNames = lik1Info.keys()
altIndices = sorted(range(len(altNames)), key=lambda k: altNames[k])
altNames.sort()
altNames = ['Alt'+t for t in altNames]
head += "\t" + "\t".join( altNames )
nullNames = lik0Info.keys()
nullIndices = sorted(range(len(nullNames)), key=lambda k: nullNames[k])
nullNames.sort()
nullNames = ['Null'+t for t in nullNames]
head += "\t" + "\t".join( nullNames )
head += "\n"
fp.write(head)
for i in xrange(len(ind)):
ii = ind[i]
result = result_dict[ii]
ll0=str( -(result.stat/2.0+result.test['lik1']['nLL'][0]) )
if result_dict[0].test.has_key("pv-local-aUD"):
rowvals = [result.setname, str(-result.test['lik1']['nLL'][0]), ll0,
str(pv_adj[ii]),str(result.test['pv-local']),str(result.pv), str(result.setsize),
str(result.nexclude), result.ichrm, result.iposrange]
else:
rowvals = [result.setname, str(-result.test['lik1']['nLL'][0]), ll0,
str(pv_adj[ii]), str(result.pv), str(result.setsize),
str(result.nexclude), result.ichrm, result.iposrange]
if signal_ratio:
rowvals.append(str(result.h2))
rowvals.append(str(result.h2_1))
row = "\t".join(rowvals)
if detailed_table:
lik1Info = result.lik1Details
lik0Info = result.lik0Details
vals = lik1Info.values()
vals = [vals[j] for j in altIndices]
row += "\t" + "\t".join([str(v) for v in vals])
vals = lik0Info.values()
vals = [vals[j] for j in nullIndices]
row += "\t" + "\t".join([str(v) for v in vals])
row += "\n"
fp.write(row)
def pv_etc(self, filenull, G0_to_use, G1, y, x, null_model, varcomp_test, forcefullrank):
if self.filenull is not None:
return lr.twokerneltest(G0=G0_to_use, G1=G1, y=y, covar=x, appendbias=False,lik0=null_model,forcefullrank = forcefullrank)
else:
return lr.onekerneltest(G1=G1, y=y, covar=x, appendbias=False,lik0=varcomp_test,forcefullrank = self.forcefullrank)
| zhonghualiu/FaST-LMM | fastlmm/association/tests/LRT_up.py | Python | apache-2.0 | 9,147 |
import os
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = 'django-jinja-bootstrap-form'
TEMPLATE_DEBUG = DEBUG = False
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jinja',
'bootstrapform_jinja',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testapp.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
import django
if django.get_version().startswith('1.8'):
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'APP_DIRS': True,
'DIRS': TEMPLATE_DIRS,
'OPTIONS': {
'match_extension': '.jinja',
}
},
]
else:
TEMPLATE_LOADERS = (
'django_jinja.loaders.FileSystemLoader',
'django_jinja.loaders.AppLoader'
)
| patryk4815/django-jinja-bootstrap-form | testing/settings.py | Python | bsd-3-clause | 1,389 |
#############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
# test bookmark functionality
def renameBookmarkFolder(view, item, newName):
invokeContextMenuItemOnBookmarkFolder(view, item, "Rename Folder")
replaceEditorContent(waitForObject(":Add Bookmark.treeView_QExpandingLineEdit"), newName)
type(waitForObject(":Add Bookmark.treeView_QExpandingLineEdit"), "<Return>")
return
def invokeContextMenuItemOnBookmarkFolder(view, item, menuItem):
aboveWidget = "{name='line' type='QFrame' visible='1' window=':Add Bookmark_BookmarkDialog'}"
mouseClick(waitForObjectItem(view, item), 5, 5, 0, Qt.LeftButton)
openItemContextMenu(view, item, 5, 5, 0)
activateItem(waitForObject("{aboveWidget=%s type='QMenu' unnamed='1' visible='1' "
"window=':Add Bookmark_BookmarkDialog'}" % aboveWidget), menuItem)
def textForQtVersion(text):
if isQt4Build:
return "QtCreator : " + text
else:
return text + " | QtCreator"
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
# goto help mode and click on topic
switchViewTo(ViewConstants.HELP)
manualQModelIndex = getQModelIndexStr("text?='Qt Creator Manual *'",
":Qt Creator_QHelpContentWidget")
doubleClick(manualQModelIndex, 5, 5, 0, Qt.LeftButton)
mouseClick(waitForObject(getQModelIndexStr("text='Building and Running an Example'",
manualQModelIndex)), 5, 5, 0, Qt.LeftButton)
# open bookmarks window
clickButton(waitForObject(":Qt Creator.Add Bookmark_QToolButton"))
clickButton(waitForObject(":Add Bookmark.ExpandBookmarksList_QToolButton"))
# create root bookmark directory
clickButton(waitForObject(":Add Bookmark.New Folder_QPushButton"))
# rename root bookmark directory
bookmarkView = waitForObject(":Add Bookmark.treeView_QTreeView")
renameBookmarkFolder(bookmarkView, "New Folder*", "Sample")
# create two more subfolders
clickButton(waitForObject(":Add Bookmark.New Folder_QPushButton"))
renameBookmarkFolder(bookmarkView, "Sample.New Folder*", "Folder 1")
clickButton(waitForObject(":Add Bookmark.New Folder_QPushButton"))
renameBookmarkFolder(bookmarkView, "Sample.Folder 1.New Folder*", "Folder 2")
clickButton(waitForObject(":Add Bookmark.OK_QPushButton"))
mouseClick(manualQModelIndex, 5, 5, 0, Qt.LeftButton)
type(waitForObject(":Qt Creator_QHelpContentWidget"), "<Down>")
clickButton(waitForObject(":Qt Creator.Add Bookmark_QToolButton"))
clickButton(waitForObject(":Add Bookmark.ExpandBookmarksList_QToolButton"))
# click on "Sample" and create new directory under it
mouseClick(waitForObject(getQModelIndexStr("text='Sample'", ":Add Bookmark.treeView_QTreeView")))
clickButton(waitForObject(":Add Bookmark.New Folder_QPushButton"))
clickButton(waitForObject(":Add Bookmark.OK_QPushButton"))
# choose bookmarks
mouseClick(waitForObjectItem(":Qt Creator_Core::Internal::CommandComboBox", "Bookmarks"))
# verify if all folders are created and bookmarks present
sampleQModelIndex = getQModelIndexStr("text='Sample'", ":Qt Creator_Bookmarks_TreeView")
folder1QModelIndex = getQModelIndexStr("text='Folder 1'", sampleQModelIndex)
folder2QModelIndex = getQModelIndexStr("text='Folder 2'", folder1QModelIndex)
bldRunQModelIndex = getQModelIndexStr("text?='%s'" % textForQtVersion("Building and Running an Example*"),
folder2QModelIndex)
newFolderQModelIndex = getQModelIndexStr("text='New Folder'", sampleQModelIndex)
manualQModelIndex = getQModelIndexStr("text='%s'" % textForQtVersion("Qt Creator Manual"),
newFolderQModelIndex)
test.verify(checkIfObjectExists(sampleQModelIndex, verboseOnFail = True) and
checkIfObjectExists(folder1QModelIndex, verboseOnFail = True) and
checkIfObjectExists(folder2QModelIndex, verboseOnFail = True) and
checkIfObjectExists(bldRunQModelIndex, verboseOnFail = True) and
checkIfObjectExists(manualQModelIndex, verboseOnFail = True),
"Verifying if all folders and bookmarks are present")
mouseClick(waitForObject(":Qt Creator_Bookmarks_TreeView"), 5, 5, 0, Qt.LeftButton)
for i in range(6):
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Right>")
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Return>")
test.verify(textForQtVersion("Building and Running an Example") in str(waitForObject(":Qt Creator_Help::Internal::HelpViewer").title),
"Verifying if first bookmark is opened")
mouseClick(waitForObject(bldRunQModelIndex))
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Down>")
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Right>")
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Down>")
type(waitForObject(":Qt Creator_Bookmarks_TreeView"), "<Return>")
test.verify(textForQtVersion("Qt Creator Manual") in str(waitForObject(":Qt Creator_Help::Internal::HelpViewer").title),
"Verifying if second bookmark is opened")
# delete previously created directory
clickButton(waitForObject(":Qt Creator.Add Bookmark_QToolButton"))
clickButton(waitForObject(":Add Bookmark.ExpandBookmarksList_QToolButton"))
invokeContextMenuItemOnBookmarkFolder(":Add Bookmark.treeView_QTreeView", "Sample.Folder 1",
"Delete Folder")
clickButton(waitForObject("{container=':Add Bookmark.treeView_QTreeView' text='Yes' "
"type='QPushButton' unnamed='1' visible='1'}"))
# close bookmarks
clickButton(waitForObject(":Add Bookmark.OK_QPushButton"))
# choose bookmarks from command combobox
mouseClick(waitForObject(":Qt Creator_Core::Internal::CommandComboBox"))
mouseClick(waitForObjectItem(":Qt Creator_Core::Internal::CommandComboBox", "Bookmarks"))
# verify if folders and bookmark deleted
test.verify(checkIfObjectExists(sampleQModelIndex, verboseOnFail = True) and
checkIfObjectExists(folder1QModelIndex, shouldExist = False, verboseOnFail = True) and
checkIfObjectExists(folder2QModelIndex, shouldExist = False, verboseOnFail = True) and
checkIfObjectExists(bldRunQModelIndex, shouldExist = False, verboseOnFail = True) and
checkIfObjectExists(manualQModelIndex, verboseOnFail = True),
"Verifying if folder 1 and folder 2 deleted including their bookmark")
# exit
invokeMenuItem("File", "Exit")
| colede/qtcreator | tests/system/suite_HELP/tst_HELP06/test.py | Python | lgpl-2.1 | 8,174 |
#!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
__all__ = ['ciphers']
m2_not_found = False
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
global m2_not_found
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
if not m2_not_found:
try:
import M2Crypto.EVP
return M2Crypto.EVP.Cipher('rc4', rc4_key, '', op, key_as_bytes=0,
d='md5', salt=None, i=1, padding=1)
except:
m2_not_found = True
import ctypes_openssl
return ctypes_openssl.CtypesCrypto('rc4', rc4_key, '', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
| gclove/shadowsocks | shadowsocks/crypto/rc4_md5.py | Python | mit | 1,815 |
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.constants import Constant
def constant_canon(expr, real_args, imag_args, real2imag):
if expr.is_real():
return Constant(expr.value.real), None
elif expr.is_imag():
return None, Constant(expr.value.imag)
else:
return (Constant(expr.value.real),
Constant(expr.value.imag))
| SteveDiamond/cvxpy | cvxpy/reductions/complex2real/atom_canonicalizers/constant_canon.py | Python | gpl-3.0 | 912 |
from typing import Generator
from followthemoney.types import registry
from followthemoney.property import Property
from nomenklatura.entity import CompositeEntity
TYPE_ORDER = {
registry.name: -6,
registry.identifier: -5,
registry.date: -4,
registry.country: -3,
registry.string: -1,
registry.text: 3,
}
def comparison_props(
left: CompositeEntity, right: CompositeEntity
) -> Generator[Property, None, None]:
"""Return an ordered list of properties to be shown in a comparison of
the two given entities."""
props = set(left.iterprops())
props.update(right.iterprops())
weights = {p.name: TYPE_ORDER.get(p.type, 0) for p in props}
for prop in props:
for schema in (left.schema, right.schema):
if prop.name in schema.featured:
weights[prop.name] -= 10
key = lambda p: (weights[p.name], p.label)
for prop in sorted(props, key=key):
if prop.hidden:
continue
if prop.type.matchable and not prop.matchable:
continue
# if prop.type == registry.entity:
# continue
yield prop
| pudo/nomenklatura | nomenklatura/tui/util.py | Python | mit | 1,138 |
# sql/util.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import exc, schema, util, sql, types as sqltypes
from sqlalchemy.util import topological
from sqlalchemy.sql import expression, operators, visitors
from itertools import chain
from collections import deque
"""Utility functions that build upon SQL and Schema constructs."""
def sort_tables(tables):
"""sort a collection of Table objects in order of their foreign-key dependency."""
tables = list(tables)
tuples = []
def visit_foreign_key(fkey):
if fkey.use_alter:
return
parent_table = fkey.column.table
if parent_table in tables:
child_table = fkey.parent.table
if parent_table is not child_table:
tuples.append((parent_table, child_table))
for table in tables:
visitors.traverse(table,
{'schema_visitor':True},
{'foreign_key':visit_foreign_key})
tuples.extend(
[parent, table] for parent in table._extra_dependencies
)
return list(topological.sort(tuples, tables))
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(expression._from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections':False}, _visitors)
return tables
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, expression.ColumnElement) and \
(
not isinstance(t, expression._UnaryExpression) or \
not operators.is_ordering_modifier(t.modifier)
):
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
stack = [search]
while stack:
elem = stack.pop()
if clause is elem:
return True
elif isinstance(elem, expression.Join):
stack.extend((elem.left, elem.right))
return False
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam':visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, basestring):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_params(object):
"""A string view of bound parameters, truncating
display to the given number of 'multi' parameter sets.
"""
def __init__(self, params, batches):
self.params = params
self.batches = batches
def __repr__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > self.batches and \
isinstance(self.params[0], (list, dict, tuple)):
return ' '.join((
repr(self.params[:self.batches - 2])[0:-1],
" ... displaying %i of %i total bound parameter sets ... " % (self.batches, len(self.params)),
repr(self.params[-2:])[1:]
))
else:
return repr(self.params)
def expression_as_ddl(clause):
"""Given a SQL expression, convert for usage in DDL, such as
CREATE INDEX and CHECK CONSTRAINT.
Converts bind params into quoted literals, column identifiers
into detached column constructs so that the parent table
identifier is not included.
"""
def repl(element):
if isinstance(element, expression._BindParamClause):
return expression.literal_column(_quote_ddl_expr(element.value))
elif isinstance(element, expression.ColumnClause) and \
element.table is not None:
return expression.column(element.name)
else:
return None
return visitors.replacement_traverse(clause, {}, repl)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements to IS NULL."""
def visit_binary(binary):
if isinstance(binary.left, expression._BindParamClause) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, expression._BindParamClause) \
and binary.right._identifying_key in nulls:
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary':visit_binary})
def join_condition(a, b, ignore_nonexistent_tables=False, a_subset=None):
"""create a join condition between two tables or selectables.
e.g.::
join_condition(tablea, tableb)
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
to join, or no way to join, an error is raised.
:param ignore_nonexistent_tables: Deprecated - this
flag is no longer used. Only resolution errors regarding
the two given tables are propagated.
:param a_subset: An optional expression that is a sub-component
of ``a``. An attempt will be made to join to just this sub-component
first before looking at the full ``a`` construct, and if found
will be successful even if there are other ways to join to ``a``.
This allows the "right side" of a join to be passed thereby
providing a "natural join".
"""
crit = []
constraints = set()
for left in (a_subset, a):
if left is None:
continue
for fk in sorted(
b.foreign_keys,
key=lambda fk:fk.parent._creation_order):
try:
col = fk.get_referent(left)
except exc.NoReferenceError, nrte:
if nrte.table_name == left.name:
raise
else:
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if left is not b:
for fk in sorted(
left.foreign_keys,
key=lambda fk:fk.parent._creation_order):
try:
col = fk.get_referent(b)
except exc.NoReferenceError, nrte:
if nrte.table_name == b.name:
raise
else:
# this is totally covered. can't get
# coverage to mark it.
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if crit:
break
if len(crit) == 0:
if isinstance(b, expression._FromGrouping):
hint = " Perhaps you meant to convert the right side to a "\
"subquery using alias()?"
else:
hint = ""
raise exc.ArgumentError(
"Can't find any foreign key relationships "
"between '%s' and '%s'.%s" % (a.description, b.description, hint))
elif len(constraints) > 1:
raise exc.ArgumentError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description))
elif len(crit) == 1:
return (crit[0])
else:
return sql.and_(*crit)
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = annotated_classes[element.__class__] = type.__new__(type,
"Annotated%s" % element.__class__.__name__,
(Annotated, element.__class__), {})
return object.__new__(cls)
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
if isinstance(element, expression.FromClause):
element.c
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = _values
return clone
def _deannotate(self):
return self.__element
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return Annotated(clone, self._annotations)
def __hash__(self):
return hash(self.__element)
def __eq__(self, other):
if isinstance(self.__element, expression.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
for cls in expression.__dict__.values() + [schema.Column, schema.Table]:
if isinstance(cls, type) and issubclass(cls, expression.ClauseElement):
exec "class Annotated%s(Annotated, cls):\n" \
" pass" % (cls.__name__, ) in locals()
exec "annotated_classes[cls] = Annotated%s" % (cls.__name__)
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
cloned = util.column_dict()
def clone(elem):
# check if element is present in the exclude list.
# take into account proxying relationships.
if elem in cloned:
return cloned[elem]
elif exclude and \
hasattr(elem, 'proxy_set') and \
elem.proxy_set.intersection(exclude):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
cloned[elem] = newelem
return newelem
if element is not None:
element = clone(element)
return element
def _deep_deannotate(element):
"""Deep copy the given element, removing all annotations."""
cloned = util.column_dict()
def clone(elem):
if elem not in cloned:
newelem = elem._deannotate()
newelem._copy_internals(clone=clone)
cloned[elem] = newelem
return cloned[elem]
if element is not None:
element = clone(element)
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, expression.Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two columns
will ultimately represent the same value because they are related by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured.
This function is primarily used to determine the most minimal "primary key"
from a selectable, by reducing the set of primary key columns present
in the the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedTableError:
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in columns:
if c.shares_lineage(binary.right):
omit.add(c)
break
for clause in clauses:
visitors.traverse(clause, {}, {'binary':visit_binary})
return expression.ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(binary.right is binary.left or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(binary.left is binary.right or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(binary.right is binary.left or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(binary.left is binary.right or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, schema.Column) and \
isinstance(binary.right, schema.Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary':visit_binary})
return pairs
def folded_equivalents(join, equivs=None):
"""Return a list of uniquely named columns.
The column list of the given Join will be narrowed
down to a list of all equivalently-named,
equated columns folded into one column, where 'equated' means they are
equated to each other in the ON clause of this join.
This function is used by Join.select(fold_equivalents=True).
Deprecated. This function is used for a certain kind of
"polymorphic_union" which is designed to achieve joined
table inheritance where the base table has no "discriminator"
column; [ticket:1131] will provide a better way to
achieve this.
"""
if equivs is None:
equivs = set()
def visit_binary(binary):
if binary.operator == operators.eq and binary.left.name == binary.right.name:
equivs.add(binary.right)
equivs.add(binary.left)
visitors.traverse(join.onclause, {}, {'binary':visit_binary})
collist = []
if isinstance(join.left, expression.Join):
left = folded_equivalents(join.left, equivs)
else:
left = list(join.left.columns)
if isinstance(join.right, expression.Join):
right = folded_equivalents(join.right, equivs)
else:
right = list(join.right.columns)
used = set()
for c in left + right:
if c in equivs:
if c.name not in used:
collist.append(c)
used.add(c.name)
else:
collist.append(c)
return collist
class AliasedRow(object):
"""Wrap a RowProxy with a translation map.
This object allows a set of keys to be translated
to those present in a RowProxy.
"""
def __init__(self, row, map):
# AliasedRow objects don't nest, so un-nest
# if another AliasedRow was passed
if isinstance(row, AliasedRow):
self.row = row.row
else:
self.row = row
self.map = map
def __contains__(self, key):
return self.map[key] in self.row
def has_key(self, key):
return key in self
def __getitem__(self, key):
return self.row[self.map[key]]
def keys(self):
return self.row.keys()
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None, include=None, exclude=None, adapt_on_names=False):
self.__traverse_options__ = {'stop_on':[selectable]}
self.selectable = selectable
self.include = include
self.exclude = exclude
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded, _seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(equiv,
require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, expression.FromClause):
if self.selectable.is_derived_from(col):
return self.selectable
if not isinstance(col, expression.ColumnElement):
return None
if self.include and col not in self.include:
return None
elif self.exclude and col in self.exclude:
return None
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Provides the ability to "wrap" this ClauseAdapter
around another, a columns dictionary which returns
adapted elements given an original, and an
adapted_row() factory.
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, include=None,
exclude=None, adapt_required=False):
ClauseAdapter.__init__(self, selectable, equivalents, include, exclude)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
self.adapt_required = adapt_required
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__ = self.__dict__.copy()
ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col)
ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause)
ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list)
ac.columns = util.populate_column_dict(ac._locate_col)
return ac
adapt_clause = ClauseAdapter.traverse
adapt_list = ClauseAdapter.copy_and_process
def _wrap(self, local, wrapped):
def locate(col):
col = local(col)
return wrapped(col)
return locate
def _locate_col(self, col):
c = self._corresponding_column(col, True)
if c is None:
c = self.adapt_clause(col)
# anonymize labels in case they have a hardcoded name
if isinstance(c, expression._Label):
c = c.label(None)
# adapt_required indicates that if we got the same column
# back which we put in (i.e. it passed through),
# it's not correct. this is used by eagerloading which
# knows that all columns and expressions need to be adapted
# to a result row, and a "passthrough" is definitely targeting
# the wrong column.
if self.adapt_required and c is col:
return None
return c
def adapted_row(self, row):
return AliasedRow(row, self.columns)
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
| aurofable/medhack-server | venv/lib/python2.7/site-packages/sqlalchemy/sql/util.py | Python | mit | 27,221 |
import re
import sys
import uuid
import copy
import datetime
from six import iteritems
from . import RefinerUtils
from pandajedi.jedicore import Interaction
from pandajedi.jedicore import JediException
from pandajedi.jedicore.JediTaskSpec import JediTaskSpec
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
from pandajedi.jedicore.JediFileSpec import JediFileSpec
from pandaserver.taskbuffer import EventServiceUtils
try:
from idds.client.client import Client as iDDS_Client
import idds.common.constants
import idds.common.utils
except ImportError:
pass
# base class for task refine
class TaskRefinerBase (object):
# constructor
def __init__(self,taskBufferIF,ddmIF):
self.ddmIF = ddmIF
self.taskBufferIF = taskBufferIF
self.initializeRefiner(None)
self.refresh()
# refresh
def refresh(self):
self.siteMapper = self.taskBufferIF.getSiteMapper()
# initialize
def initializeRefiner(self,tmpLog):
self.taskSpec = None
self.inMasterDatasetSpec = []
self.inSecDatasetSpecList = []
self.outDatasetSpecList = []
self.outputTemplateMap = {}
self.jobParamsTemplate = None
self.cloudName = None
self.siteName = None
self.tmpLog = tmpLog
self.updatedTaskParams = None
self.unmergeMasterDatasetSpec = {}
self.unmergeDatasetSpecMap = {}
self.oldTaskStatus = None
self.unknownDatasetList = []
# set jobParamsTemplate
def setJobParamsTemplate(self,jobParamsTemplate):
self.jobParamsTemplate = jobParamsTemplate
# extract common parameters
def extractCommon(self,jediTaskID,taskParamMap,workQueueMapper,splitRule):
# make task spec
taskSpec = JediTaskSpec()
taskSpec.jediTaskID = jediTaskID
taskSpec.attemptNr = 0
taskSpec.taskName = taskParamMap['taskName']
taskSpec.userName = taskParamMap['userName']
taskSpec.vo = taskParamMap['vo']
taskSpec.prodSourceLabel = taskParamMap['prodSourceLabel']
taskSpec.taskPriority = taskParamMap['taskPriority']
if 'currentPriority' in taskParamMap:
taskSpec.currentPriority = taskParamMap['currentPriority']
else:
taskSpec.currentPriority = taskSpec.taskPriority
taskSpec.architecture = taskParamMap['architecture']
taskSpec.transUses = taskParamMap['transUses']
taskSpec.transHome = taskParamMap['transHome']
if 'transPath' in taskParamMap:
taskSpec.transPath = taskParamMap['transPath']
taskSpec.processingType = taskParamMap['processingType']
taskSpec.taskType = taskParamMap['taskType']
taskSpec.splitRule = splitRule
taskSpec.startTime = datetime.datetime.utcnow()
if 'workingGroup' in taskParamMap:
taskSpec.workingGroup = taskParamMap['workingGroup']
if 'countryGroup' in taskParamMap:
taskSpec.countryGroup = taskParamMap['countryGroup']
if 'ticketID' in taskParamMap:
taskSpec.ticketID = taskParamMap['ticketID']
if 'ticketSystemType' in taskParamMap:
taskSpec.ticketSystemType = taskParamMap['ticketSystemType']
if 'reqID' in taskParamMap:
taskSpec.reqID = taskParamMap['reqID']
else:
taskSpec.reqID = jediTaskID
if 'coreCount' in taskParamMap:
taskSpec.coreCount = taskParamMap['coreCount']
else:
taskSpec.coreCount = 1
if 'walltime' in taskParamMap:
taskSpec.walltime = taskParamMap['walltime']
else:
taskSpec.walltime = 0
if 'walltimeUnit' not in taskParamMap:
# force to set NULL so that retried tasks get data from scouts again
taskSpec.forceUpdate('walltimeUnit')
if 'outDiskCount' in taskParamMap:
taskSpec.outDiskCount = taskParamMap['outDiskCount']
else:
taskSpec.outDiskCount = 0
if 'outDiskUnit' in taskParamMap:
taskSpec.outDiskUnit = taskParamMap['outDiskUnit']
if 'workDiskCount' in taskParamMap:
taskSpec.workDiskCount = taskParamMap['workDiskCount']
else:
taskSpec.workDiskCount = 0
if 'workDiskUnit' in taskParamMap:
taskSpec.workDiskUnit = taskParamMap['workDiskUnit']
if 'ramCount' in taskParamMap:
taskSpec.ramCount = taskParamMap['ramCount']
else:
taskSpec.ramCount = 0
if 'ramUnit' in taskParamMap:
taskSpec.ramUnit = taskParamMap['ramUnit']
if 'baseRamCount' in taskParamMap:
taskSpec.baseRamCount = taskParamMap['baseRamCount']
else:
taskSpec.baseRamCount = 0
# IO
if 'ioIntensity' in taskParamMap:
taskSpec.ioIntensity = taskParamMap['ioIntensity']
if 'ioIntensityUnit' in taskParamMap:
taskSpec.ioIntensityUnit = taskParamMap['ioIntensityUnit']
# HS06 stuff
if 'cpuTimeUnit' in taskParamMap:
taskSpec.cpuTimeUnit = taskParamMap['cpuTimeUnit']
if 'cpuTime' in taskParamMap:
taskSpec.cpuTime = taskParamMap['cpuTime']
if 'cpuEfficiency' in taskParamMap:
taskSpec.cpuEfficiency = taskParamMap['cpuEfficiency']
else:
# 90% of cpu efficiency by default
taskSpec.cpuEfficiency = 90
if 'baseWalltime' in taskParamMap:
taskSpec.baseWalltime = taskParamMap['baseWalltime']
else:
# 10min of offset by default
taskSpec.baseWalltime = 10*60
# for merge
if 'mergeRamCount' in taskParamMap:
taskSpec.mergeRamCount = taskParamMap['mergeRamCount']
if 'mergeCoreCount' in taskParamMap:
taskSpec.mergeCoreCount = taskParamMap['mergeCoreCount']
# scout
if 'skipScout' not in taskParamMap and not taskSpec.isPostScout():
taskSpec.setUseScout(True)
# cloud
if 'cloud' in taskParamMap:
self.cloudName = taskParamMap['cloud']
taskSpec.cloud = self.cloudName
else:
# set dummy to force update
taskSpec.cloud = 'dummy'
taskSpec.cloud = None
# site
if 'site' in taskParamMap:
self.siteName = taskParamMap['site']
taskSpec.site = self.siteName
else:
# set dummy to force update
taskSpec.site = 'dummy'
taskSpec.site = None
# nucleus
if 'nucleus' in taskParamMap:
taskSpec.nucleus = taskParamMap['nucleus']
# preset some parameters for job cloning
if 'useJobCloning' in taskParamMap:
# set implicit parameters
if 'nEventsPerWorker' not in taskParamMap:
taskParamMap['nEventsPerWorker'] = 1
if 'nSitesPerJob' not in taskParamMap:
taskParamMap['nSitesPerJob'] = 2
if 'nEsConsumers' not in taskParamMap:
taskParamMap['nEsConsumers'] = taskParamMap['nSitesPerJob']
# minimum granularity
if 'minGranularity' in taskParamMap:
taskParamMap['nEventsPerRange'] = taskParamMap['minGranularity']
# event service flag
if 'useJobCloning' in taskParamMap:
taskSpec.eventService = 2
elif 'nEventsPerWorker' in taskParamMap:
taskSpec.eventService = 1
else:
taskSpec.eventService = 0
# OS
if 'osInfo' in taskParamMap:
taskSpec.termCondition = taskParamMap['osInfo']
# ttcr: requested time to completion
if 'ttcrTimestamp' in taskParamMap:
try:
# get rid of the +00:00 timezone string and parse the timestamp
taskSpec.ttcRequested = datetime.datetime.strptime(taskParamMap['ttcrTimestamp'].split('+')[0], '%Y-%m-%d %H:%M:%S.%f')
except (IndexError, ValueError):
pass
# goal
if 'goal' in taskParamMap:
try:
taskSpec.goal = int(float(taskParamMap['goal'])*10)
if taskSpec.goal > 1000:
taskSpec.goal = None
except Exception:
pass
# campaign
if 'campaign' in taskParamMap:
taskSpec.campaign = taskParamMap['campaign']
# request type
if 'requestType' in taskParamMap:
taskSpec.requestType = taskParamMap['requestType']
# image name
if 'container_name' in taskParamMap:
taskSpec.container_name = taskParamMap['container_name']
self.taskSpec = taskSpec
# set split rule
if 'tgtNumEventsPerJob' in taskParamMap:
# set nEventsPerJob not respect file boundaries when nFilesPerJob is not used
if 'nFilesPerJob' not in taskParamMap:
self.setSplitRule(None,taskParamMap['tgtNumEventsPerJob'],JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nFilesPerJob', JediTaskSpec.splitRuleToken['nFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerJob', JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nGBPerJob', JediTaskSpec.splitRuleToken['nGBPerJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerJob', JediTaskSpec.splitRuleToken['nMaxFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerWorker', JediTaskSpec.splitRuleToken['nEventsPerWorker'])
self.setSplitRule(taskParamMap,'disableAutoRetry', JediTaskSpec.splitRuleToken['disableAutoRetry'])
self.setSplitRule(taskParamMap,'nEsConsumers', JediTaskSpec.splitRuleToken['nEsConsumers'])
self.setSplitRule(taskParamMap,'waitInput', JediTaskSpec.splitRuleToken['waitInput'])
self.setSplitRule(taskParamMap,'addNthFieldToLFN', JediTaskSpec.splitRuleToken['addNthFieldToLFN'])
self.setSplitRule(taskParamMap,'scoutSuccessRate', JediTaskSpec.splitRuleToken['scoutSuccessRate'])
self.setSplitRule(taskParamMap,'t1Weight', JediTaskSpec.splitRuleToken['t1Weight'])
self.setSplitRule(taskParamMap,'maxAttemptES', JediTaskSpec.splitRuleToken['maxAttemptES'])
self.setSplitRule(taskParamMap,'maxAttemptEsJob', JediTaskSpec.splitRuleToken['maxAttemptEsJob'])
self.setSplitRule(taskParamMap,'nSitesPerJob', JediTaskSpec.splitRuleToken['nSitesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerMergeJob', JediTaskSpec.splitRuleToken['nEventsPerMergeJob'])
self.setSplitRule(taskParamMap,'nFilesPerMergeJob', JediTaskSpec.splitRuleToken['nFilesPerMergeJob'])
self.setSplitRule(taskParamMap,'nGBPerMergeJob', JediTaskSpec.splitRuleToken['nGBPerMergeJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerMergeJob', JediTaskSpec.splitRuleToken['nMaxFilesPerMergeJob'])
self.setSplitRule(taskParamMap,'maxWalltime', JediTaskSpec.splitRuleToken['maxWalltime'])
self.setSplitRule(taskParamMap,'tgtMaxOutputForNG', JediTaskSpec.splitRuleToken['tgtMaxOutputForNG'])
self.setSplitRule(taskParamMap, 'maxNumJobs', JediTaskSpec.splitRuleToken['maxNumJobs'])
self.setSplitRule(taskParamMap, 'totNumJobs', JediTaskSpec.splitRuleToken['totNumJobs'])
self.setSplitRule(taskParamMap, 'nChunksToWait', JediTaskSpec.splitRuleToken['nChunksToWait'])
if 'forceStaged' in taskParamMap:
taskParamMap['useLocalIO'] = taskParamMap['forceStaged']
if 'useLocalIO' in taskParamMap:
if taskParamMap['useLocalIO']:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['useLocalIO'])
else:
self.setSplitRule(None, 0, JediTaskSpec.splitRuleToken['useLocalIO'])
if 'nJumboJobs' in taskParamMap:
self.setSplitRule(taskParamMap,'nJumboJobs',JediTaskSpec.splitRuleToken['nJumboJobs'])
taskSpec.useJumbo = JediTaskSpec.enum_useJumbo['waiting']
if 'maxJumboPerSite' in taskParamMap:
self.setSplitRule(taskParamMap,'maxJumboPerSite',JediTaskSpec.splitRuleToken['maxJumboPerSite'])
if 'minCpuEfficiency' in taskParamMap:
self.setSplitRule(taskParamMap,'minCpuEfficiency',JediTaskSpec.splitRuleToken['minCpuEfficiency'])
if 'loadXML' in taskParamMap:
self.setSplitRule(None,3,JediTaskSpec.splitRuleToken['loadXML'])
self.setSplitRule(None,4,JediTaskSpec.splitRuleToken['groupBoundaryID'])
if 'pfnList' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['pfnList'])
if 'noWaitParent' in taskParamMap and taskParamMap['noWaitParent'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['noWaitParent'])
if 'respectLB' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['respectLB'])
if 'releasePerLB' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['releasePerLB'])
if 'orderByLB' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['orderByLB'])
if 'respectSplitRule' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['respectSplitRule'])
if 'reuseSecOnDemand' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['reuseSecOnDemand'])
if 'ddmBackEnd' in taskParamMap:
self.taskSpec.setDdmBackEnd(taskParamMap['ddmBackEnd'])
if 'disableReassign' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['disableReassign'])
if 'allowPartialFinish' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowPartialFinish'])
if 'useExhausted' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useExhausted'])
if 'useRealNumEvents' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useRealNumEvents'])
if 'ipConnectivity' in taskParamMap:
self.taskSpec.setIpConnectivity(taskParamMap['ipConnectivity'])
if 'altStageOut' in taskParamMap:
self.taskSpec.setAltStageOut(taskParamMap['altStageOut'])
if 'allowInputLAN' in taskParamMap:
self.taskSpec.setAllowInputLAN(taskParamMap['allowInputLAN'])
if 'runUntilClosed' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['runUntilClosed'])
if 'stayOutputOnSite' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['stayOutputOnSite'])
if 'useJobCloning' in taskParamMap:
scValue = EventServiceUtils.getJobCloningValue(taskParamMap['useJobCloning'])
self.setSplitRule(None,scValue,JediTaskSpec.splitRuleToken['useJobCloning'])
if 'failWhenGoalUnreached' in taskParamMap and taskParamMap['failWhenGoalUnreached'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['failGoalUnreached'])
if 'switchEStoNormal' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['switchEStoNormal'])
if 'nEventsPerRange' in taskParamMap:
self.setSplitRule(taskParamMap,'nEventsPerRange',JediTaskSpec.splitRuleToken['dynamicNumEvents'])
if 'allowInputWAN' in taskParamMap and taskParamMap['allowInputWAN'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowInputWAN'])
if 'putLogToOS' in taskParamMap and taskParamMap['putLogToOS'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['putLogToOS'])
if 'mergeEsOnOS' in taskParamMap and taskParamMap['mergeEsOnOS'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeEsOnOS'])
if 'writeInputToFile' in taskParamMap and taskParamMap['writeInputToFile'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['writeInputToFile'])
if 'useFileAsSourceLFN' in taskParamMap and taskParamMap['useFileAsSourceLFN'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useFileAsSourceLFN'])
if 'ignoreMissingInDS' in taskParamMap and taskParamMap['ignoreMissingInDS'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['ignoreMissingInDS'])
if 'noExecStrCnv' in taskParamMap and taskParamMap['noExecStrCnv'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['noExecStrCnv'])
if 'inFilePosEvtNum' in taskParamMap and taskParamMap['inFilePosEvtNum'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['inFilePosEvtNum'])
if self.taskSpec.useEventService() and not taskSpec.useJobCloning():
if 'registerEsFiles' in taskParamMap and taskParamMap['registerEsFiles'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['registerEsFiles'])
if 'disableAutoFinish' in taskParamMap and taskParamMap['disableAutoFinish'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['disableAutoFinish'])
if 'resurrectConsumers' in taskParamMap and taskParamMap['resurrectConsumers'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['resurrectConsumers'])
if 'usePrefetcher' in taskParamMap and taskParamMap['usePrefetcher'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['usePrefetcher'])
if 'notDiscardEvents' in taskParamMap and taskParamMap['notDiscardEvents'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['notDiscardEvents'])
if 'decAttOnFailedES' in taskParamMap and taskParamMap['decAttOnFailedES'] is True:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['decAttOnFailedES'])
if 'useZipToPin' in taskParamMap and taskParamMap['useZipToPin'] is True:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['useZipToPin'])
if 'osMatching' in taskParamMap and taskParamMap['osMatching'] is True:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['osMatching'])
if 'multiStepExec' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['multiStepExec'])
if 'onlyTagsForFC' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['onlyTagsForFC'])
if 'segmentedWork' in taskParamMap and 'segmentSpecs' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['segmentedWork'])
if 'avoidVP' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['avoidVP'])
if 'inputPreStaging' in taskParamMap and taskParamMap['inputPreStaging'] is True:
self.setSplitRule(None, JediTaskSpec.enum_inputPreStaging['use'],
JediTaskSpec.splitRuleToken['inputPreStaging'])
if 'hpoWorkflow' in taskParamMap and taskParamMap['hpoWorkflow'] is True and 'hpoRequestData' in taskParamMap:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['hpoWorkflow'])
if 'noLoopingCheck' in taskParamMap and taskParamMap['noLoopingCheck']:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['noLoopingCheck'])
if 'encJobParams' in taskParamMap and taskParamMap['encJobParams']:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['encJobParams'])
if 'useSecrets' in taskParamMap and taskParamMap['useSecrets']:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['useSecrets'])
if 'debugMode' in taskParamMap and taskParamMap['debugMode']:
self.setSplitRule(None, 1, JediTaskSpec.splitRuleToken['debugMode'])
if 'maxCoreCount' in taskParamMap:
self.setSplitRule(taskParamMap, 'maxCoreCount', JediTaskSpec.splitRuleToken['maxCoreCount'])
# work queue
workQueue = None
if 'workQueueName' in taskParamMap:
# work queue is specified
workQueue = workQueueMapper.getQueueByName(taskSpec.vo, taskSpec.prodSourceLabel, taskParamMap['workQueueName'])
if workQueue is None:
# get work queue based on task attributes
workQueue,tmpStr = workQueueMapper.getQueueWithSelParams(taskSpec.vo,
taskSpec.prodSourceLabel,
prodSourceLabel=taskSpec.prodSourceLabel,
processingType=taskSpec.processingType,
workingGroup=taskSpec.workingGroup,
coreCount=taskSpec.coreCount,
site=taskSpec.site,
eventService=taskSpec.eventService,
splitRule=taskSpec.splitRule,
campaign=taskSpec.campaign)
if workQueue is None:
errStr = 'workqueue is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
errStr += 'processingType={0} workingGroup={1} coreCount={2} eventService={3} '.format(taskSpec.processingType,
taskSpec.workingGroup,
taskSpec.coreCount,
taskSpec.eventService)
errStr += 'splitRule={0} campaign={1}'.format(taskSpec.splitRule,taskSpec.campaign)
raise RuntimeError(errStr)
self.taskSpec.workQueue_ID = workQueue.queue_id
# Initialize the global share
gshare = 'Undefined'
if 'gshare' in taskParamMap and self.taskBufferIF.is_valid_share(taskParamMap['gshare']):
# work queue is specified
gshare = taskParamMap['gshare']
else:
# get share based on definition
gshare = self.taskBufferIF.get_share_for_task(self.taskSpec)
if gshare is None:
gshare = 'Undefined' # Should not happen. Undefined is set when no share is found
# errStr = 'share is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
# errStr += 'workingGroup={0} campaign={1} '.format(taskSpec.workingGroup, taskSpec.campaign)
# raise RuntimeError,errStr
self.taskSpec.gshare = gshare
# Initialize the resource type
try:
self.taskSpec.resource_type = self.taskBufferIF.get_resource_type_task(self.taskSpec)
except Exception:
self.taskSpec.resource_type = 'Undefined'
# return
return
# basic refinement procedure
def doBasicRefine(self,taskParamMap):
# get input/output/log dataset specs
nIn = 0
nOutMap = {}
if 'log' not in taskParamMap:
itemList = taskParamMap['jobParameters']
elif isinstance(taskParamMap['log'],dict):
itemList = taskParamMap['jobParameters'] + [taskParamMap['log']]
else:
itemList = taskParamMap['jobParameters'] + taskParamMap['log']
if 'log_merge' in taskParamMap:
itemList += [taskParamMap['log_merge']]
# pseudo input
if 'noInput' in taskParamMap and taskParamMap['noInput'] is True:
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'pseudo_dataset'
tmpItem['param_type'] = 'pseudo_input'
itemList = [tmpItem] + itemList
# random seed
if RefinerUtils.useRandomSeed(taskParamMap):
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'RNDMSEED'
tmpItem['param_type'] = 'random_seed'
itemList.append(tmpItem)
# loop over all items
allDsList = []
for tmpItem in itemList:
# look for datasets
if tmpItem['type'] == 'template' and 'dataset' in tmpItem:
# avoid duplication
if tmpItem['dataset'] not in allDsList:
allDsList.append(tmpItem['dataset'])
else:
continue
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = tmpItem['dataset']
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = tmpItem['param_type']
if 'container' in tmpItem:
datasetSpec.containerName = tmpItem['container']
if 'token' in tmpItem:
datasetSpec.storageToken = tmpItem['token']
if 'destination' in tmpItem:
datasetSpec.destination = tmpItem['destination']
if 'attributes' in tmpItem:
datasetSpec.setDatasetAttribute(tmpItem['attributes'])
if 'ratio' in tmpItem:
datasetSpec.setDatasetAttribute('ratio={0}'.format(tmpItem['ratio']))
if 'eventRatio' in tmpItem:
datasetSpec.setEventRatio(tmpItem['eventRatio'])
if 'check' in tmpItem:
datasetSpec.setDatasetAttribute('cc')
if 'usedup' in tmpItem:
datasetSpec.setDatasetAttribute('ud')
if 'random' in tmpItem:
datasetSpec.setDatasetAttribute('rd')
if 'reusable' in tmpItem:
datasetSpec.setDatasetAttribute('ru')
if 'indexConsistent' in tmpItem:
datasetSpec.setDatasetAttributeWithLabel('indexConsistent')
if 'mergeOnly' in tmpItem:
datasetSpec.setDatasetAttributeWithLabel('mergeOnly')
if 'offset' in tmpItem:
datasetSpec.setOffset(tmpItem['offset'])
if 'allowNoOutput' in tmpItem:
datasetSpec.allowNoOutput()
if 'nFilesPerJob' in tmpItem:
datasetSpec.setNumFilesPerJob(tmpItem['nFilesPerJob'])
if 'num_records' in tmpItem:
datasetSpec.setNumRecords(tmpItem['num_records'])
if 'transient' in tmpItem:
datasetSpec.setTransient(tmpItem['transient'])
if 'pseudo' in tmpItem:
datasetSpec.setPseudo()
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 0
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.nFilesWaiting = 0
datasetSpec.nEvents = 0
datasetSpec.nEventsUsed = 0
datasetSpec.nEventsToBeUsed = 0
datasetSpec.status = 'defined'
if datasetSpec.type in JediDatasetSpec.getInputTypes() + ['random_seed']:
datasetSpec.streamName = RefinerUtils.extractStreamName(tmpItem['value'])
if 'expandedList' not in tmpItem:
tmpItem['expandedList'] = []
# dataset names could be comma-concatenated
datasetNameList = datasetSpec.datasetName.split(',')
# datasets could be added by incexec
incexecDS = 'dsFor{0}'.format(datasetSpec.streamName)
# remove /XYZ
incexecDS = incexecDS.split('/')[0]
if incexecDS in taskParamMap:
for tmpDatasetName in taskParamMap[incexecDS].split(','):
if tmpDatasetName not in datasetNameList:
datasetNameList.append(tmpDatasetName)
# loop over all dataset names
inDatasetSpecList = []
for datasetName in datasetNameList:
# skip empty
if datasetName == '':
continue
# expand
if datasetSpec.isPseudo() or datasetSpec.type in ['random_seed'] or datasetName == 'DBR_LATEST':
# pseudo input
tmpDatasetNameList = [datasetName]
if self.taskSpec.is_work_segmented():
tmpDatasetNameList *= len(taskParamMap['segmentSpecs'])
elif 'expand' in tmpItem and tmpItem['expand'] is True:
# expand dataset container
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).expandContainer(datasetName)
else:
# normal dataset name
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).listDatasets(datasetName)
i_element = 0
for elementDatasetName in tmpDatasetNameList:
if nIn > 0 or elementDatasetName not in tmpItem['expandedList'] or \
self.taskSpec.is_work_segmented():
tmpItem['expandedList'].append(elementDatasetName)
inDatasetSpec = copy.copy(datasetSpec)
inDatasetSpec.datasetName = elementDatasetName
if nIn > 0 or not self.taskSpec.is_hpo_workflow():
inDatasetSpec.containerName = datasetName
else:
if self.taskSpec.is_work_segmented():
inDatasetSpec.containerName = "{}/{}".format(
taskParamMap['segmentSpecs'][i_element]['name'],
taskParamMap['segmentSpecs'][i_element]['id']
)
else:
inDatasetSpec.containerName = "None/None"
inDatasetSpecList.append(inDatasetSpec)
i_element += 1
# empty input
if inDatasetSpecList == [] and self.oldTaskStatus != 'rerefine':
errStr = 'doBasicRefine : unknown input dataset "{0}"'.format(datasetSpec.datasetName)
self.taskSpec.setErrDiag(errStr)
if datasetSpec.datasetName not in self.unknownDatasetList:
self.unknownDatasetList.append(datasetSpec.datasetName)
raise JediException.UnknownDatasetError(errStr)
# set master flag
for inDatasetSpec in inDatasetSpecList:
if nIn == 0:
# master
self.inMasterDatasetSpec.append(inDatasetSpec)
else:
# secondary
self.inSecDatasetSpecList.append(inDatasetSpec)
nIn += 1
continue
if datasetSpec.type in ['output','log']:
if datasetSpec.type not in nOutMap:
nOutMap[datasetSpec.type] = 0
# make stream name
if not datasetSpec.is_merge_only():
datasetSpec.streamName = "{0}{1}".format(datasetSpec.type.upper(),nOutMap[datasetSpec.type])
else:
datasetSpec.streamName = 'LOG_MERGE'
nOutMap[datasetSpec.type] += 1
# set attribute for event service
if self.taskSpec.useEventService() and 'objectStore' in taskParamMap and datasetSpec.type in ['output']:
datasetSpec.setObjectStore(taskParamMap['objectStore'])
# extract output filename template and change the value field
outFileTemplate,tmpItem['value'] = RefinerUtils.extractReplaceOutFileTemplate(tmpItem['value'],
datasetSpec.streamName)
# make output template
if outFileTemplate is not None:
if 'offset' in tmpItem:
offsetVal = 1 + tmpItem['offset']
else:
offsetVal = 1
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : offsetVal,
'streamName' : datasetSpec.streamName,
'filenameTemplate' : outFileTemplate,
'outtype' : datasetSpec.type,
}
if datasetSpec.outputMapKey() in self.outputTemplateMap:
# multiple files are associated to the same output datasets
self.outputTemplateMap[datasetSpec.outputMapKey()].append(outTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[datasetSpec.outputMapKey()] = [outTemplateMap]
# append
self.outDatasetSpecList.append(datasetSpec)
# used only in merge
if datasetSpec.is_merge_only():
continue
# make unmerged dataset
if 'mergeOutput' in taskParamMap and taskParamMap['mergeOutput'] is True:
umDatasetSpec = JediDatasetSpec()
umDatasetSpec.datasetName = 'panda.um.' + datasetSpec.datasetName
umDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
umDatasetSpec.storageToken = 'TOMERGE'
umDatasetSpec.vo = datasetSpec.vo
umDatasetSpec.type = "tmpl_trn_" + datasetSpec.type
umDatasetSpec.nFiles = 0
umDatasetSpec.nFilesUsed = 0
umDatasetSpec.nFilesToBeUsed = 0
umDatasetSpec.nFilesFinished = 0
umDatasetSpec.nFilesFailed = 0
umDatasetSpec.nFilesOnHold = 0
umDatasetSpec.status = 'defined'
umDatasetSpec.streamName = datasetSpec.streamName
if datasetSpec.isAllowedNoOutput():
umDatasetSpec.allowNoOutput()
# ratio
if datasetSpec.getRatioToMaster() > 1:
umDatasetSpec.setDatasetAttribute('ratio={0}'.format(datasetSpec.getRatioToMaster()))
# make unmerged output template
if outFileTemplate is not None:
umOutTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : umDatasetSpec.streamName,
'outtype' : datasetSpec.type,
}
# append temporary name
if 'umNameAtEnd' in taskParamMap and taskParamMap['umNameAtEnd'] is True:
# append temporary name at the end
umOutTemplateMap['filenameTemplate'] = outFileTemplate + '.panda.um'
else:
umOutTemplateMap['filenameTemplate'] = 'panda.um.' + outFileTemplate
if umDatasetSpec.outputMapKey() in self.outputTemplateMap:
# multiple files are associated to the same output datasets
self.outputTemplateMap[umDatasetSpec.outputMapKey()].append(umOutTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[umDatasetSpec.outputMapKey()] = [umOutTemplateMap]
# use log as master for merging
if datasetSpec.type == 'log':
self.unmergeMasterDatasetSpec[datasetSpec.outputMapKey()] = umDatasetSpec
else:
# append
self.unmergeDatasetSpecMap[datasetSpec.outputMapKey()] = umDatasetSpec
# set attributes for merging
if 'mergeOutput' in taskParamMap and taskParamMap['mergeOutput'] is True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeOutput'])
# make job parameters
rndmSeedOffset = None
firstEventOffset = None
jobParameters = ''
for tmpItem in taskParamMap['jobParameters']:
if 'value' in tmpItem:
# hidden parameter
if 'hidden' in tmpItem and tmpItem['hidden'] is True:
continue
# add tags for ES-only parameters
esOnly = False
if 'es_only' in tmpItem and tmpItem['es_only'] is True:
esOnly = True
if esOnly:
jobParameters += '<PANDA_ES_ONLY>'
jobParameters += '{0}'.format(tmpItem['value'])
if esOnly:
jobParameters += '</PANDA_ES_ONLY>'
# padding
if 'padding' in tmpItem and tmpItem['padding'] is False:
pass
else:
jobParameters += ' '
# get offset for random seed and first event
if tmpItem['type'] == 'template' and tmpItem['param_type'] == 'number':
if '${RNDMSEED}' in tmpItem['value']:
if 'offset' in tmpItem:
rndmSeedOffset = tmpItem['offset']
else:
rndmSeedOffset = 0
elif '${FIRSTEVENT}' in tmpItem['value']:
if 'offset' in tmpItem:
firstEventOffset = tmpItem['offset']
jobParameters = jobParameters[:-1]
# append parameters for event service merging if necessary
esmergeParams = self.getParamsForEventServiceMerging(taskParamMap)
if esmergeParams is not None:
jobParameters += esmergeParams
self.setJobParamsTemplate(jobParameters)
# set random seed offset
if rndmSeedOffset is not None:
self.setSplitRule(None,rndmSeedOffset,JediTaskSpec.splitRuleToken['randomSeed'])
if firstEventOffset is not None:
self.setSplitRule(None,firstEventOffset,JediTaskSpec.splitRuleToken['firstEvent'])
# send HPO request
if self.taskSpec.is_hpo_workflow():
try:
data = copy.copy(taskParamMap['hpoRequestData'])
data['workload_id'] = self.taskSpec.jediTaskID
data['is_pseudo_input'] = True
req = {
'requester': 'panda',
'request_type': idds.common.constants.RequestType.HyperParameterOpt,
'transform_tag': idds.common.constants.RequestType.HyperParameterOpt.value,
'status': idds.common.constants.RequestStatus.New,
'priority': 0,
'lifetime': 30,
'request_metadata': data,
}
c = iDDS_Client(idds.common.utils.get_rest_host())
self.tmpLog.debug('req {0}'.format(str(req)))
ret = c.add_request(**req)
self.tmpLog.debug('got requestID={0}'.format(str(ret)))
except Exception as e:
errStr = 'iDDS failed with {0}'.format(str(e))
raise JediException.ExternalTempError(errStr)
# return
return
# replace placeholder with dict provided by prepro job
def replacePlaceHolders(self,paramItem,placeHolderName,newValue):
if isinstance(paramItem, dict):
# loop over all dict params
for tmpParName,tmpParVal in iteritems(paramItem):
if tmpParVal == placeHolderName:
# replace placeholder
paramItem[tmpParName] = newValue
elif isinstance(tmpParVal, dict) or \
isinstance(tmpParVal, list):
# recursive execution
self.replacePlaceHolders(tmpParVal,placeHolderName,newValue)
elif isinstance(paramItem, list):
# loop over all list items
for tmpItem in paramItem:
self.replacePlaceHolders(tmpItem,placeHolderName,newValue)
# refinement procedure for preprocessing
def doPreProRefine(self,taskParamMap):
# no preprocessing
if 'preproSpec' not in taskParamMap:
return None,taskParamMap
# already preprocessed
if self.taskSpec.checkPreProcessed():
# get replaced task params
tmpStat,tmpJsonStr = self.taskBufferIF.getPreprocessMetadata_JEDI(self.taskSpec.jediTaskID)
try:
# replace placeholders
replaceParams = RefinerUtils.decodeJSON(tmpJsonStr)
self.tmpLog.debug("replace placeholders with "+str(replaceParams))
for tmpKey,tmpVal in iteritems(replaceParams):
self.replacePlaceHolders(taskParamMap,tmpKey,tmpVal)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
self.tmpLog.error('{0} failed to get additional task params with {1}:{2}'.format(self.__class__.__name__,
errtype.__name__,errvalue))
return False,taskParamMap
# succeeded
self.updatedTaskParams = taskParamMap
return None,taskParamMap
# make dummy dataset to keep track of preprocessing
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = 'panda.pp.in.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = 'pp_input'
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 1
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesToBeUsed = 1
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.status = 'ready'
self.inMasterDatasetSpec.append(datasetSpec)
# make file
fileSpec = JediFileSpec()
fileSpec.jediTaskID = datasetSpec.jediTaskID
fileSpec.type = datasetSpec.type
fileSpec.status = 'ready'
fileSpec.lfn = 'pseudo_lfn'
fileSpec.attemptNr = 0
fileSpec.maxAttempt = 3
fileSpec.keepTrack = 1
datasetSpec.addFile(fileSpec)
# make log dataset
logDatasetSpec = JediDatasetSpec()
logDatasetSpec.datasetName = 'panda.pp.log.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
logDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
logDatasetSpec.type = 'tmpl_pp_log'
logDatasetSpec.streamName = 'PP_LOG'
logDatasetSpec.vo = self.taskSpec.vo
logDatasetSpec.nFiles = 0
logDatasetSpec.nFilesUsed = 0
logDatasetSpec.nFilesToBeUsed = 0
logDatasetSpec.nFilesFinished = 0
logDatasetSpec.nFilesFailed = 0
logDatasetSpec.nFilesOnHold = 0
logDatasetSpec.status = 'defined'
self.outDatasetSpecList.append(logDatasetSpec)
# make output template for log
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : logDatasetSpec.streamName,
'filenameTemplate' : "{0}._${{SN}}.log.tgz".format(logDatasetSpec.datasetName),
'outtype' : re.sub('^tmpl_','',logDatasetSpec.type),
}
self.outputTemplateMap[logDatasetSpec.outputMapKey()] = [outTemplateMap]
# set split rule to use preprocessing
self.taskSpec.setPrePro()
# set task status
self.taskSpec.status = 'topreprocess'
# return
return True,taskParamMap
# set split rule
def setSplitRule(self,taskParamMap,keyName,valName):
if taskParamMap is not None:
if keyName not in taskParamMap:
return
tmpStr = '{0}={1}'.format(valName,taskParamMap[keyName])
else:
tmpStr = '{0}={1}'.format(valName,keyName)
if self.taskSpec.splitRule in [None,'']:
self.taskSpec.splitRule = tmpStr
else:
tmpMatch = re.search(valName+'=(-*\d+)(,-*\d+)*',self.taskSpec.splitRule)
if tmpMatch is None:
# append
self.taskSpec.splitRule += ',{0}'.format(tmpStr)
else:
# replace
self.taskSpec.splitRule = self.taskSpec.splitRule.replace(tmpMatch.group(0), tmpStr)
return
# get parameters for event service merging
def getParamsForEventServiceMerging(self,taskParamMap):
# no event service
if not self.taskSpec.useEventService():
return None
# extract parameters
transPath = 'UnDefined'
jobParameters = 'UnDefined'
if 'esmergeSpec' in taskParamMap:
if 'transPath' in taskParamMap['esmergeSpec']:
transPath = taskParamMap['esmergeSpec']['transPath']
if 'jobParameters' in taskParamMap['esmergeSpec']:
jobParameters = taskParamMap['esmergeSpec']['jobParameters']
# return
return '<PANDA_ESMERGE_TRF>'+transPath+'</PANDA_ESMERGE_TRF>'+'<PANDA_ESMERGE_JOBP>'+jobParameters+'</PANDA_ESMERGE_JOBP>'
Interaction.installSC(TaskRefinerBase)
| PanDAWMS/panda-jedi | pandajedi/jedirefine/TaskRefinerBase.py | Python | apache-2.0 | 47,294 |
# -*- coding: utf-8 -*-
from etapi.app import create_app
from etapi.settings import ProdConfig, DevConfig
def test_production_config():
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
assert app.config['ASSETS_DEBUG'] is False
def test_dev_config():
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
assert app.config['ASSETS_DEBUG'] is True
| hypebeast/etapi | tests/test_config.py | Python | bsd-3-clause | 527 |
import pyximport; pyximport.install()
import math
#from numpy import *
import numpy as np
import numpy.linalg as la
from rlscore.learner.abstract_learner import AbstractLearner
from rlscore import model
from rlscore.utilities import array_tools
from rlscore.utilities import decomposition
from rlscore.utilities import sparse_kronecker_multiplication_tools
import cython_two_step_rls_cv
class TwoStepRLS(object):
def __init__(self, **kwargs):
Y = kwargs["train_labels"]
Y = array_tools.as_labelmatrix(Y)
self.Y = Y
if kwargs.has_key('kmatrix1'):
K1 = np.mat(kwargs['kmatrix1'])
K2 = np.mat(kwargs['kmatrix2'])
self.K1, self.K2 = K1, K2
self.kernelmode = True
else:
X1 = np.mat(kwargs['xmatrix1'])
X2 = np.mat(kwargs['xmatrix2'])
self.X1, self.X2 = X1, X2
self.kernelmode = False
if kwargs.has_key('regparam1'):
self.regparam1 = kwargs["regparam1"]
else:
self.regparam1 = kwargs["regparam"]
if kwargs.has_key('regparam2'):
self.regparam2 = kwargs["regparam2"]
else:
self.regparam2 = kwargs["regparam"]
self.trained = False
def createLearner(cls, **kwargs):
learner = cls(**kwargs)
return learner
createLearner = classmethod(createLearner)
def train(self):
if self.kernelmode:
self.solve_kernel(self.regparam1, self.regparam2)
else:
self.solve_linear(self.regparam1, self.regparam2)
def solve_kernel(self, regparam1, regparam2):
self.regparam1 = regparam1
self.regparam2 = regparam2
K1, K2 = self.K1, self.K2
Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
#assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
if not self.trained:
self.trained = True
evals1, V = decomposition.decomposeKernelMatrix(K1)
evals1 = np.mat(evals1).T
evals1 = np.multiply(evals1, evals1)
V = np.mat(V)
self.evals1 = evals1
self.V = V
evals2, U = decomposition.decomposeKernelMatrix(K2)
evals2 = np.mat(evals2).T
evals2 = np.multiply(evals2, evals2)
U = np.mat(U)
self.evals2 = evals2
self.U = U
self.VTYU = V.T * self.Y * U
#newevals = 1. / (self.evals1 * self.evals2.T + regparam)
self.newevals1 = 1. / (self.evals1 + regparam1)
self.newevals2 = 1. / (self.evals2 + regparam2)
newevals = self.newevals1 * self.newevals2.T
self.A = np.multiply(self.VTYU, newevals)
self.A = self.V * self.A * self.U.T
self.model = KernelPairwiseModel(self.A)
#self.dsikm1 = la.inv(K1 + regparam1 * (np.mat(np.eye(K1.shape[0]))))
#self.dsikm2 = la.inv(K2 + regparam2 * (np.mat(np.eye(K2.shape[0]))))
def solve_linear(self, regparam1, regparam2):
self.regparam1 = regparam1
self.regparam2 = regparam2
X1, X2 = self.X1, self.X2
Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
if not self.trained:
self.trained = True
svals1, V, rsvecs1 = decomposition.decomposeDataMatrix(X1.T)
self.svals1 = svals1.T
self.evals1 = np.multiply(self.svals1, self.svals1)
self.V = V
self.rsvecs1 = np.mat(rsvecs1)
if X1.shape == X2.shape and (X1 == X2).all():
svals2, U, rsvecs2 = svals1, V, rsvecs1
else:
svals2, U, rsvecs2 = decomposition.decomposeDataMatrix(X2.T)
self.svals2 = svals2.T
self.evals2 = np.multiply(self.svals2, self.svals2)
self.U = U
self.rsvecs2 = np.mat(rsvecs2)
self.VTYU = V.T * Y * U
self.newevals1 = 1. / (self.evals1 + regparam1)
self.newevals2 = 1. / (self.evals2 + regparam2)
newevals = self.newevals1 * self.newevals2.T
newevals = np.multiply(self.svals1, self.newevals1) * np.multiply(self.svals2, self.newevals2).T
self.W = np.multiply(self.VTYU, newevals)
self.W = self.rsvecs1.T * self.W * self.rsvecs2
self.model = LinearPairwiseModel(self.W)
def computeLOO(self):
bevals_col = np.multiply(self.evals2, self.newevals2).T
svecsm_col = np.multiply(bevals_col, self.U)
#print rightall.shape, svecsm.shape, self.Y.shape
#right = svecsm.T * self.Y - multiply(svecsm, self.Y).T
RQR_col = np.sum(np.multiply(self.U, svecsm_col), axis = 1)
#RQY = sum(multiply(self.svecs.T, right), axis = 0)
#RQY = sum(multiply(self.svecs.T, svecsm.T * self.Y), axis = 0) - sum(multiply(RQRT.T, self.Y), axis = 1).T
#RQY = self.svecs * (svecsm.T * self.Y) - sum(multiply(RQR, self.Y), axis = 1)
LOO_ek_col = (1. / (1. - RQR_col))
#LOO = multiply(LOO_ek, RQY)
#print LOO_ek.shape, (self.svecs * (svecsm.T * self.Y)).shape, RQR.shape, self.Y.shape
LOO_col = (np.multiply(LOO_ek_col, self.U * (svecsm_col.T * self.Y.T)) - np.multiply(LOO_ek_col, np.multiply(RQR_col, self.Y.T))).T
#print 'LOO_col', LOO_col
bevals_row = np.multiply(self.evals1, self.newevals1).T
svecsm_row = np.multiply(bevals_row, self.V)
#print rightall.shape, svecsm.shape, self.Y.shape
#right = svecsm.T * self.Y - multiply(svecsm, self.Y).T
RQR_row = np.sum(np.multiply(self.V, svecsm_row), axis = 1)
#RQY = sum(multiply(self.svecs.T, right), axis = 0)
#RQY = sum(multiply(self.svecs.T, svecsm.T * self.Y), axis = 0) - sum(multiply(RQRT.T, self.Y), axis = 1).T
#RQY = self.svecs * (svecsm.T * self.Y) - sum(multiply(RQR, self.Y), axis = 1)
LOO_ek_row = (1. / (1. - RQR_row))
#LOO = multiply(LOO_ek, RQY)
#print LOO_ek.shape, (self.svecs * (svecsm.T * self.Y)).shape, RQR.shape, self.Y.shape
LOO_two_step = np.multiply(LOO_ek_row, self.V * (svecsm_row.T * LOO_col)) - np.multiply(LOO_ek_row, np.multiply(RQR_row, LOO_col))
return LOO_two_step
def compute_symmetric_double_LOO(self):
#bevals_col = np.multiply(self.evals2, self.newevals2).T
#multiplyright = self.U.T * self.Y.T
#I = np.mat(np.identity(2))
G = np.multiply((self.newevals1.T-(1./self.regparam1)), self.V) * self.V.T + (1./self.regparam1) * np.mat(np.identity(self.K1.shape[0]))
#G2 = np.multiply((self.newevals2.T-(1./self.regparam)), self.U) * self.U.T + (1./self.regparam) * np.mat(np.identity(self.K2.shape[0]))
GY = G * self.Y
#YG2 = self.Y * G2
GYG = GY * G
#A2 = G2 * self.Y.T
i, j = 2, 4
inds = [i, j]
#A = self.U[inds]
#right = multiplyright - A.T * self.Y.T[inds]
#RQY = A * np.multiply(bevals_col.T, right)
#B = np.multiply(bevals_col.T, A.T)
#HO_col = (la.inv(I - A * B) * RQY).T
#HO_col = (self.Y.T[inds]-la.inv(G2[np.ix_(inds, inds)]) * A2[inds]).T
#print HO_col.shape
#bevals_row = np.multiply(self.evals1, self.newevals1).T
#multiplyright = self.V.T * HO_col
#A = self.V[inds]
#right = multiplyright - A.T * HO_col[inds]
#RQY = A * np.multiply(bevals_row.T, right)
#B = np.multiply(bevals_col.T, A.T)
#HO_row = la.inv(I - A * B) * RQY
#A1 = G1[inds] * HO_col
#HO_row = HO_col[inds]-la.inv(G1[np.ix_(inds, inds)]) * A1
#HO_rowr = self.Y[np.ix_(inds, inds)] \
# - YG2[np.ix_(inds, inds)] * la.inv(G2[np.ix_(inds, inds)]) \
# - la.inv(G1[np.ix_(inds, inds)]) * G1Y[np.ix_(inds, inds)] \
# + la.inv(G1[np.ix_(inds, inds)]) * G1YG2[np.ix_(inds, inds)] * la.inv(G2[np.ix_(inds, inds)])
invGii = la.inv(G[np.ix_(inds, inds)])
GYii = GY[np.ix_(inds, inds)]
invGiiGYii = invGii * GYii
HO_rowr = self.Y[np.ix_(inds, inds)] \
- invGiiGYii.T \
- invGiiGYii \
+ invGii * GYG[np.ix_(inds, inds)] * invGii
#II1 = np.mat(np.identity(self.Y.shape[0]))[inds]
#II2 = np.mat(np.identity(self.Y.shape[1]))[:, inds]
#HO_rowr = (II1 - la.inv(G1[np.ix_(inds, inds)]) * G1[inds]) * self.Y * (II2 - G2[:, inds] * la.inv(G2[np.ix_(inds, inds)]))
#print HO_row.shape
results = np.zeros((self.Y.shape[0], self.Y.shape[1]))
cython_two_step_rls_cv.compute_symmetric_double_loo(G, self.Y, GY, GYG, results, self.Y.shape[0], self.Y.shape[1])
return results
def getModel(self):
return self.model
class KernelPairwiseModel(object):
def __init__(self, A, kernel = None):
"""Initializes the dual model
@param A: dual coefficient matrix
@type A: numpy matrix"""
self.A = A
self.kernel = kernel
def predictWithKernelMatrices(self, K1pred, K2pred):
"""Computes predictions for test examples.
Parameters
----------
K1pred: {array-like, sparse matrix}, shape = [n_samples1, n_basis_functions1]
the first part of the test data matrix
K2pred: {array-like, sparse matrix}, shape = [n_samples2, n_basis_functions2]
the second part of the test data matrix
Returns
----------
P: array, shape = [n_samples1, n_samples2]
predictions
"""
#print K1pred.shape, self.A.shape, K2pred.shape
P = np.array(np.dot(np.dot(K1pred, self.A), K2pred.T))
return P
class LinearPairwiseModel(object):
def __init__(self, W):
"""Initializes the linear model
@param W: primal coefficient matrix
@type W: numpy matrix"""
self.W = W
def predictWithDataMatrices(self, X1pred, X2pred):
"""Computes predictions for test examples.
Parameters
----------
X1pred: {array-like, sparse matrix}, shape = [n_samples1, n_features1]
the first part of the test data matrix
X2pred: {array-like, sparse matrix}, shape = [n_samples2, n_features2]
the second part of the test data matrix
Returns
----------
P: array, shape = [n_samples1, n_samples2]
predictions
"""
P = np.array(np.dot(np.dot(X1pred, self.W), X2pred.T))
return P
| max291/RLScore | rlscore/learner/two_step_rls.py | Python | mit | 10,917 |
from distutils.core import setup
setup(
name = "aswath",
packages = ['aswath'],
description = 'Aswath Damodaran DCF calculations',
author = 'Mark Carter'
)
| blippy/aswath | setup.py | Python | gpl-3.0 | 179 |
#!/usr/bin/env python3
###############################################################################
# pynoter -- message handler
#
# The message handler of the pynoter package. This class was designed to
# correctly display the messages from all clients. The problem which
# occurred during the development of this library was that messages could
# be lost if they aren't enqueued correctly. Therefore a worker thread was
# designed to face this problem. This thread has a message queue and displays
# the messages in the correct order and checks if everything is shown the way
# the user wants to see it.
#
# License: GPLv3
#
# (c) Till Smejkal - [email protected]
###############################################################################
from threading import Thread, Lock, RLock, Semaphore
import logging
logger = logging.getLogger(__name__)
__all__ = ['MessageHandler']
class Item:
"""
The interface which an item of the queue must implement.
"""
def __call__(self, message_handler):
"""
Function call operator.
Here everything should be done what the item wants to do. This function
is called in the run method of the MessageHandler class.
:param message_handler: The MessageHandler instance which wants to
execute the items tasks.
:type message_handler: MessageHandler
"""
raise NotImplementedError()
class MessageItem(Item):
"""
An item which can be put in the queue representing a message which should
be displayed.
"""
def __init__(self, handler, message):
"""
Constructor of this class.
:param handler: The client handler serving the client of this message.
:type handler: ClientHandler
:param message: The message which should be displayed.
:type message: Message
"""
self._id = handler.id + "-" + message.id
self._ref_id = handler.id + "-" + message.reference
self._handler = handler
self._message = message
def __call__(self, message_handler):
"""
Function call operator.
Show the message and all the others part of the closure and wait until
the message vanishes again, before continuing execution.
:param message_handler: The MessageHandler instance which wants to
execute the item.
:type message_handler: MessageHandler
"""
message_handler._show_with_closure(self)
message_handler._wait()
message_handler._reset_current()
@property
def id(self):
"""
Get the identifier of this message item.
:rtype: str
:return: The identifier of this message item.
"""
return self._id
@property
def message(self):
"""
Get the contained message.
:rtype: Message
:return: The message contained in this item.
"""
return self._message
@property
def ref_id(self):
"""
Get the identifier of the message item which is referenced by this one.
:rtype: str
:return: The identifier of the referenced message item.
"""
return self._ref_id
def revises(item1, item2):
"""
Check if the first given item revises the second one.
:param item1: The item which may revises the other one.
:type item1: MessageItem
:param item2: The item which may be revised by the other one.
:type item2: MessageItem
:rtype: bool
:return: Whether or not the first item revises the second one.
"""
if not isinstance(item1, MessageItem) or \
not isinstance(item2, MessageItem):
return False
if item1 is None or item2 is None:
return False
# Check if the first item revises the second one.
if item1.message.updates:
# If the item performs an update, just the reference id is important.
return item1.ref_id == item2.id
if item1.message.appends:
# If the item performs an append, the reference id and the subjects of
# the two messages are important as the notification library only
# appends one message to another if they have the same subject.
return item1.ref_id == item2.id and \
item1.message.subject == item2.message.subject
return False
def closure(item, queue):
"""
Calculate the transitive closure for the revise relation on the
given queue.
:param item: The item for which the closure should be calculated.
:type item: MessageItem
:param queue: The queue containing the other elements.
:type queue: Queue
:rtype: list[MessageItem]
:return: The list with all items part of the closure.
"""
others = [i for i in queue if revises(i, item)]
for i in others[:]:
# Remove the item from the queue.
queue.remove(i)
# Find all items revising the found ones recursively.
others.extend(closure(i, queue))
return others
class HandlerStopItem:
"""
An item which can be put into the queue, which will cause the MessageHandler
to stop its execution.
"""
def __call__(self, message_handler):
"""
Function call operator.
Cause the given MessageHandler instance to stop its execution.
:param message_handler: The MessageHandler instance which wants to
execute the item.
:type message_handler: MessageHandler
"""
message_handler._should_stop = True
class Queue:
"""
An asynchronous FIFO message queue working according to the producer
consumer pattern.
"""
def __init__(self):
"""
Constructor of this class. It will set up the internal data structure
as well as all synchronization variables.
"""
self._queue = [] #< The internal list of messages.
self._semaphore = Semaphore(0) #< The counting semaphore used to reach
# the producer consumer pattern without
# busy waiting.
self._lock = Lock() #< The lock to protect the internal list.
def __iter__(self):
"""
Get an iterator for the queue.
During iteration the internal lock will be hold. Hence no inserts
or removals should be done, otherwise a deadlock will occur.
"""
with self._lock:
for i in self._queue:
yield i
def enqueue(self, item):
"""
Add an item to the tail of the queue.
:param item: The item which should be added.
:type item: Item
"""
with self._lock:
self._queue.append(item)
self._semaphore.release()
def dequeue(self):
"""
Get another item from the queue. If there are items in the list, the
head will be returned. Otherwise, this method will block until a new
item is added.
"""
self._semaphore.acquire()
with self._lock:
return self._queue.pop(0)
def remove(self, item):
"""
Remove an item from the list at an arbitrary position.
This will also count down the internal semaphore.
:param item: The item which should be removed from the list.
:type item: Item
"""
self._semaphore.acquire()
with self._lock:
self._queue.remove(item)
class MessageHandler(Thread):
"""
This class is the worker thread which asynchronously displays the
notification messages which are received from the clients. It has an
internal queue where messages can be enqueued and which works according
to the producer consumer pattern.
"""
def __init__(self):
"""
Constructor of the class. Here the thread will be initialized as well
as all used locks and other synchronization variables.
"""
logger.debug("Create a new message handler")
# Call the super constructor to properly setup the thread.
super(MessageHandler, self).__init__()
# Internal variables.
self._should_stop = False #< Indicates that the thread should stop
# its loop.
self._queue = Queue() #< The queue of item which must be processed
# by the consumer thread.
self._current = None #< Information about the message which is
# displayed at the moment.
self._current_lock = RLock() #< Lock for the information about the
# currently displayed message as they are
# accessed from this thread and from
# others as well.
def _reset_current(self):
"""
Reset the information about the currently shown message to its default.
"""
with self._current_lock:
self._current = None
def _show_without_closure(self, item, use_flags = True):
"""
Display the given notification message without all the other
messages part of its closure.
:param item: The message queue item which should be displayed.
:type item: MessageItem
:param use_flags: Whether the message flags should be used while
displaying or not. (Defaults to True)
:type use_flags: bool
"""
if item.message.display(use_flags):
with self._current_lock:
self._current = item
def _show_with_closure(self, item):
"""
Display the given notification message together with all the other
messages part of its closure.
:param item: The message queue item which should be displayed.
:type item: MessageItem
"""
with self._current_lock:
# Calculate the closure for the item.
clo = closure(item, self._queue)
# Display the item without flags.
self._show_without_closure(item, use_flags=False)
# Display the item from the closure.
for i in clo:
self._show_without_closure(i)
def _wait(self):
"""
Wait until the message currently displayed vanishes.
"""
while True:
logger.debug("Wait until the current message vanishes.")
with self._current_lock:
cur = self._current
if cur is None or cur.message.wait_for_closed():
logger.debug("Waiting done.")
return
def enqueue(self, handler, message):
"""
Enqueue a new message from the given client handler in the message
queue so that it can be displayed soon.
This method is normally executed on the client handlers thread.
:param handler: The client handler.
:type handler: ClientHandler
:param message: The message object which should be displayed.
:type message: Message
"""
item = MessageItem(handler, message)
with self._current_lock:
if revises(item, self._current):
logger.debug("Directly show message from {}.".format(
handler.id))
# The new message will change the currently displayed one.
# Hence display it directly without adding it to the queue.
self._show_without_closure(item)
return
# Otherwise, just add it to the queue.
logger.debug("Enqueue message from {}.".format(handler.id))
self._queue.enqueue(item)
def run(self):
"""
Main execution routine of the message handler.
"""
logger.debug("Message handler started.")
while not self._should_stop:
# Get the next item from the queue. This will block until an item
# is available for processing.
item = self._queue.dequeue()
logger.debug("Dequeued item from queue.")
# Process the item.
item(self)
logger.debug("Message handler stopped.")
def stop(self):
"""
Stop the execution of this message handler.
"""
logger.debug("Stopping message handler.")
self._queue.enqueue(HandlerStopItem())
| l3nkz/pynoter | pynoter/server/message_handler.py | Python | gpl-3.0 | 12,613 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name="dotgen",
version="0.1.5",
description="Dotfiles generator",
author="Fabian Köhler",
author_email="[email protected]",
url="https://github.com/f-koehler/dotgen",
license="MIT",
packages=["dotgen", "dotgen.plugins"],
entry_points={"console_scripts": ["dotgen = dotgen.__main__:main"]})
| f-koehler/dotgen | setup.py | Python | mit | 425 |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest.mock import Mock
import webob
from trove.common import cfg
from trove.tests.unittests import trove_testtools
from trove.versions import BaseVersion
from trove.versions import Version
from trove.versions import VersionDataView
from trove.versions import VERSIONS
from trove.versions import VersionsAPI
from trove.versions import VersionsController
from trove.versions import VersionsDataView
BASE_URL = 'http://localhost'
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
class VersionsControllerTest(trove_testtools.TestCase):
def setUp(self):
super(VersionsControllerTest, self).setUp()
self.controller = VersionsController()
self.assertIsNotNone(self.controller,
"VersionsController instance was None")
def tearDown(self):
super(VersionsControllerTest, self).tearDown()
cfg.CONF.clear_override('public_endpoint')
def test_index_json(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result,
'Result was None')
result._data = Mock()
result._data.data_for_json = \
lambda: {'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{'href': 'http://localhost/v1.0/',
'rel': 'self'}]}
# can be anything but xml
json_data = result.data("application/json")
self.assertIsNotNone(json_data,
'Result json_data was None')
self.assertEqual('v1.0', json_data['id'],
'Version id is incorrect')
self.assertEqual('CURRENT', json_data['status'],
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'],
'Version updated value is incorrect')
def test_index_json_with_public_endpoint(self):
cfg.CONF.set_override('public_endpoint', "https://example.com:8779")
req = webob.Request.blank('/')
resp = self.controller.index(req)
result = resp.data('application/json')['versions']
expected = [
{
'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{
'href': 'https://example.com:8779/v1.0/',
'rel': 'self'}]
}
]
self.assertEqual(expected, result)
def test_show_json(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
json_data = result.data("application/json")
self.assertIsNotNone(json_data, "JSON data was None")
version = json_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version['status'],
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version['updated'],
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'")
def test_show_json_with_public_endpoint(self):
cfg.CONF.set_override('public_endpoint', "https://example.com:8779")
req = webob.Request.blank('/')
req.url_version = '1.0'
resp = self.controller.show(req)
result = resp.data('application/json')['version']
expected = {
'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{
'href': 'https://example.com:8779/',
'rel': 'self'}]
}
self.assertEqual(expected, result)
class BaseVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(BaseVersionTestCase, self).setUp()
self.base_version = BaseVersion(id, status, base_url, updated)
self.assertIsNotNone(self.base_version,
'BaseVersion instance was None')
def test_data(self):
data = self.base_version.data()
self.assertIsNotNone(data, 'Base Version data was None')
self.assertTrue(type(data) is dict,
"Base Version data is not a dict")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_url(self):
url = self.base_version.url()
self.assertIsNotNone(url, 'Url was None')
self.assertEqual('http://localhost/v1.0/', url,
"Base Version url is incorrect")
class VersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
def test_url_no_trailing_slash(self):
url = self.version.url()
self.assertIsNotNone(url, 'Version url was None')
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
def test_url_with_trailing_slash(self):
self.version.base_url = 'http://localhost/'
url = self.version.url()
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
class VersionDataViewTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionDataViewTestCase, self).setUp()
# get a version object first
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
# then create an instance of VersionDataView
self.version_data_view = VersionDataView(self.version)
self.assertIsNotNone(self.version_data_view,
'Version Data view instance was None')
def test_data_for_json(self):
json_data = self.version_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON version data is not a dict")
self.assertIsNotNone(json_data.get('version'),
"Dict json_data has no key 'version'")
data = json_data['version']
self.assertIsNotNone(data, "JSON data version was None")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
class VersionsDataViewTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionsDataViewTestCase, self).setUp()
# get a version object, put it in a list
self.versions = []
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
self.versions.append(self.version)
# then create an instance of VersionsDataView
self.versions_data_view = VersionsDataView(self.versions)
self.assertIsNotNone(self.versions_data_view,
'Versions Data view instance was None')
def test_data_for_json(self):
json_data = self.versions_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON versions data is not a dict")
self.assertIsNotNone(json_data.get('versions', None),
"Dict json_data has no key 'versions'")
versions = json_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertEqual(1, len(versions), "Versions length != 1")
# explode the version object
versions_data = [v.data() for v in self.versions]
d1 = versions_data.pop()
d2 = versions.pop()
self.assertEqual(d1['id'], d2['id'],
"Version ids are not equal")
class VersionAPITestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionAPITestCase, self).setUp()
def test_instance(self):
self.versions_api = VersionsAPI()
self.assertIsNotNone(self.versions_api,
"VersionsAPI instance was None")
| openstack/trove | trove/tests/unittests/api/test_versions.py | Python | apache-2.0 | 9,754 |
# encoding: utf-8
"""
Enable pyglet to be used interacive by setting PyOS_InputHook.
Authors
-------
* Nicolas P. Rougier
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import sys
import time
from timeit import default_timer as clock
import pyglet
#-----------------------------------------------------------------------------
# Platform-dependent imports and functions
#-----------------------------------------------------------------------------
if os.name == 'posix':
import select
def stdin_ready():
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
if infds:
return True
else:
return False
elif sys.platform == 'win32':
import msvcrt
def stdin_ready():
return msvcrt.kbhit()
# On linux only, window.flip() has a bug that causes an AttributeError on
# window close. For details, see:
# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
if sys.platform.startswith('linux'):
def flip(window):
try:
window.flip()
except AttributeError:
pass
else:
def flip(window):
window.flip()
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def inputhook_pyglet():
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
t = clock()
while not stdin_ready():
pyglet.clock.tick()
for window in pyglet.app.windows:
window.switch_to()
window.dispatch_events()
window.dispatch_event('on_draw')
flip(window)
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 5*60.0:
# print 'Sleep for 5 s' # dbg
time.sleep(5.0)
elif used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0
| cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/lib/inputhookpyglet.py | Python | lgpl-3.0 | 3,794 |
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match']
from heapq import nlargest as _nlargest
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = _nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; the underlying SequenceMatcher class has
an adaptive notion of "noise" lines that's better than any static
definition the author has ever been able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def _check_types(a, b, *args):
# Checking types is weird, but the alternative is garbled output when
# someone passes mixed bytes and str to {unified,context}_diff(). E.g.
# without this check, passing filenames as bytes results in output like
# --- b'oldfile.txt'
# +++ b'newfile.txt'
# because of how str.format() incorporates bytes objects.
if a and not isinstance(a[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(a[0]).__name__, a[0]))
if b and not isinstance(b[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(b[0]).__name__, b[0]))
for arg in args:
if not isinstance(arg, str):
raise TypeError('all arguments must be str, not: %r' % (arg,))
def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
r"""
Compare `a` and `b`, two sequences of lines represented as bytes rather
than str. This is a wrapper for `dfunc`, which is typically either
unified_diff() or context_diff(). Inputs are losslessly converted to
strings so that `dfunc` only has to worry about strings, and encoded
back to bytes on return. This is necessary to compare files with
unknown or inconsistent encoding. All other inputs (except `n`) must be
bytes rather than str.
"""
def decode(s):
try:
return s.decode('ascii', 'surrogateescape')
except AttributeError as err:
msg = ('all arguments must be bytes, not %s (%r)' %
(type(s).__name__, s))
raise TypeError(msg) from err
a = list(map(decode, a))
b = list(map(decode, b))
fromfile = decode(fromfile)
tofile = decode(tofile)
fromfiledate = decode(fromfiledate)
tofiledate = decode(tofiledate)
lineterm = decode(lineterm)
lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
for line in lines:
yield line.encode('ascii', 'surrogateescape')
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions, or can be None:
- linejunk: A function that should accept a single string argument and
return true iff the string is junk. The default is None, and is
recommended; the underlying SequenceMatcher class has an adaptive
notion of "noise" lines.
- charjunk: A function that accepts a character (string of length
1), and returns true iff the character is junk. The default is
the module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: it's a bad idea to
include newline in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile(r'(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in reversed(sub_info):
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
lines.append(next(diff_lines_iterator, 'X'))
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see an intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
return
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
try:
from_line, to_line, found_diff = next(line_iterator)
except StopIteration:
return
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
yield from line_pair_iterator
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
try:
from_line, to_line, found_diff = next(line_pair_iterator)
except StopIteration:
return
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
try:
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
except StopIteration:
# Catch exception from next() and return normally
return
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=%(charset)s" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self, fromlines, tolines, fromdesc='', todesc='',
context=False, numlines=5, *, charset='utf-8'):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
charset -- charset of the HTML document
"""
return (self._file_template % dict(
styles=self._styles,
legend=self._legend,
table=self.make_table(fromlines, tolines, fromdesc, todesc,
context=context, numlines=numlines),
charset=charset
)).encode(charset, 'xmlcharrefreplace').decode(charset)
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| prefetchnta/questlab | bin/x64bin/python/36/Lib/difflib.py | Python | lgpl-2.1 | 86,474 |
import json
import os
import uuid
folder, fname = os.path.split(__file__)
def contents_of(f):
with open(f, 'r') as ins_file:
contents = ' '.join(ins_file.readlines())
return contents
DEBUG = bool(int(os.environ.get('SOWING_DEBUG', 1))) # True
CSRF_SECRET = 'mysecretsRsaf3' if DEBUG else uuid.uuid4().hex
LOCAL_SETTINGS = os.environ.get('SOWING_SETTINGS', None)
if LOCAL_SETTINGS is None:
LOCAL_SETTINGS = os.path.join(folder, 'local-settings.json')
if not os.path.exists(LOCAL_SETTINGS):
raise EnvironmentError('no configuration settings `local-settings.py`, %s' % LOCAL_SETTINGS)
APP_CONFIG = {
'port': 8888,
'host': '127.0.0.1',
'domain': 'sowingseasons.com',
'protocol': 'http' if DEBUG else 'https', # we don't support HTTP on the WildWildWeb
'media': r'/home/blake/temp/sowing-seasons-media',
'private_settings': json.load(open(LOCAL_SETTINGS, 'r')),
'logging': {
'version': 1,
'incremental': False,
'disable_existing_loggers': False,
'loggers': {
'summer': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'qualname': 'sowing',
'propagate': 0
}
},
'formatters': {
"default": {
"format": "%(asctime)s %(ip)-15s %(levelname)-5s %(name)-40s: %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
}
},
'filters': {
'traffic': {
'()': 'summer.ext.logs.IPFilter'
}
},
'handlers': {
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'level': 'DEBUG',
'filename': r'/home/blake/temp/sowing-seasons-logs/server.log',
'maxBytes': 10000000,
'backupCount': 20,
'mode': 'a',
'filters': ['traffic']
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stdout',
'filters': ['traffic']
}
}
}
}
SEO_VALUES = {
'title': 'SowingSeasons - takes awhile to grow anything.',
'keywords': 'technology,programming,python',
'description': contents_of(r'DESCRIPTION'),
'author': 'Blake VandeMerwe <[email protected]>',
'author_name': 'Blake VandeMerwe',
'author_email': '[email protected]',
'google': {
'author_id': '+BlakeVandeMerwe'
},
'img': r'/static/img/profile.jpg',
'run_analytics': not DEBUG
}
TORNADO_CONFIG = {
'debug': DEBUG,
'compress_response': True,
'cookie_secret': CSRF_SECRET,
'login_url': '/login',
'xsrf_cookies': True,
# static files
'static_hash_cache': not DEBUG,
}
WHOOSH = {
'index_name': 'sowing-seasons',
'location': r'/home/blake/temp/sowing-seasons-index'
} | blakev/sowing-seasons | summer/settings.py | Python | mit | 3,040 |
#
# Copyright (c) 2012-2014 Jonathan Topf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaRender as OpenMayaRender
import maya.OpenMayaUI as OpenMayaUI
import maya.cmds as cmds
import inspect
import os
import os.path
ROOT_DIRECTORY = os.path.split((os.path.dirname(inspect.getfile(inspect.currentframe()))))[0]
sys.path.append(os.path.join(ROOT_DIRECTORY, 'nodes'))
import ms_menu
import ms_shelf
import ms_commands
#--------------------------------------------------------------------------------------------------
# mayaseed plugin.
#--------------------------------------------------------------------------------------------------
def initializePlugin(obj):
import ms_appleseed_material
ms_appleseed_material.initializePlugin(obj)
import ms_appleseed_shading_node
ms_appleseed_shading_node.initializePlugin(obj)
import ms_environment
ms_environment.initializePlugin(obj)
import ms_physical_environment
ms_physical_environment.initializePlugin(obj)
import ms_render_settings
ms_render_settings.initializePlugin(obj)
import ms_appleseed_scene
ms_appleseed_scene.initializePlugin(obj)
ms_menu.createMenu()
ms_menu.buildMenu()
ms_shelf.create_if_absent()
import AEms_renderSettingsTemplate
appleseed_version_notice = 'This version of mayaseed is designed to work with {0}. Other versions of appleseed may work but have not been tested.'.format(ms_commands.RECCOMENDED_APPLESEED_VERSION)
ms_commands.info(appleseed_version_notice)
def uninitializePlugin(obj):
import ms_appleseed_material
ms_appleseed_material.uninitializePlugin(obj)
import ms_appleseed_shading_node
ms_appleseed_shading_node.uninitializePlugin(obj)
import ms_environment
ms_environment.uninitializePlugin(obj)
import ms_physical_environment
ms_physical_environment.uninitializePlugin(obj)
import ms_render_settings
ms_render_settings.uninitializePlugin(obj)
import ms_appleseed_scene
ms_appleseed_scene.uninitializePlugin(obj)
ms_menu.deleteMenu()
| ckod3/mayaseed | plug-ins/mayaseed.py | Python | mit | 3,201 |
"""
Apiary is a modern data-center management tool that includes provisioning,
configuration management and inventory control.
"""
__version__ = '0.0.1'
UUID_RE= "(?P<id>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})"
| gmr/apiary | apiary/__init__.py | Python | bsd-3-clause | 235 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from pathlib import Path
from onto2nx import OWLParser, parse_owl
from tests.contants import pizza_iri, test_owl_ado, test_owl_pizza, test_owl_wine
from tests.mocks import mock_parse_owl_rdf, mock_parse_owl_xml
EXPECTED_PIZZA_NODES = {
'Pizza',
'Topping',
'CheeseTopping',
'FishTopping',
'MeatTopping',
'TomatoTopping'
}
EXPECTED_PIZZA_EDGES = {
('CheeseTopping', 'Topping'),
('FishTopping', 'Topping'),
('MeatTopping', 'Topping'),
('TomatoTopping', 'Topping')
}
wine_prefixes = {
'owl': "http://www.w3.org/2002/07/owl#",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
}
wine_classes = {
'Region',
'Vintage',
'VintageYear',
'Wine',
'WineDescriptor',
'WineColor',
'WineTaste',
'WineBody',
'WineFlavor',
'WineSugar',
'Winery'
}
wine_individuals = {
'Red',
'Rose',
'White',
'Full',
'Light',
'Medium',
'Delicate',
'Moderate',
'Strong',
'Dry',
'OffDry',
'Sweet',
# Wineries
'Bancroft',
'Beringer',
'ChateauChevalBlanc',
'ChateauDeMeursault',
'ChateauDYchem',
'ChateauLafiteRothschild',
'ChateauMargauxWinery',
'ChateauMorgon',
'ClosDeLaPoussie',
'ClosDeVougeot',
'CongressSprings',
'Corbans',
'CortonMontrachet',
'Cotturi',
'DAnjou',
'Elyse',
'Forman',
'Foxen',
'GaryFarrell',
'Handley',
'KalinCellars',
'KathrynKennedy',
'LaneTanner',
'Longridge',
'Marietta',
'McGuinnesso',
'Mountadam',
'MountEdenVineyard',
'PageMillWinery',
'PeterMccoy',
'PulignyMontrachet',
'SantaCruzMountainVineyard',
'SaucelitoCanyon',
'SchlossRothermel',
'SchlossVolrad',
'SeanThackrey',
'Selaks',
'SevreEtMaine',
'StGenevieve',
'Stonleigh',
'Taylor',
'Ventana',
'WhitehallLane',
# Wines
}
wine_nodes = wine_classes | wine_individuals
wine_subclasses = {
('WineSugar', 'WineTaste'),
('WineTaste', 'WineDescriptor'),
('WineColor', 'WineDescriptor')
}
wine_membership = {
('Red', 'WineColor'),
('Rose', 'WineColor'),
('White', 'WineColor'),
('Full', 'WineBody'),
('Light', 'WineBody'),
('Medium', 'WineBody'),
('Delicate', 'WineFlavor'),
('Moderate', 'WineFlavor'),
('Strong', 'WineFlavor'),
('Dry', 'WineSugar'),
('OffDry', 'WineSugar'),
('Sweet', 'WineSugar'),
# Winery Membership
('Bancroft', 'Winery'),
('Beringer', 'Winery'),
('ChateauChevalBlanc', 'Winery'),
('ChateauDeMeursault', 'Winery'),
('ChateauDYchem', 'Winery'),
('ChateauLafiteRothschild', 'Winery'),
('ChateauMargauxWinery', 'Winery'),
('ChateauMorgon', 'Winery'),
('ClosDeLaPoussie', 'Winery'),
('ClosDeVougeot', 'Winery'),
('CongressSprings', 'Winery'),
('Corbans', 'Winery'),
('CortonMontrachet', 'Winery'),
('Cotturi', 'Winery'),
('DAnjou', 'Winery'),
('Elyse', 'Winery'),
('Forman', 'Winery'),
('Foxen', 'Winery'),
('GaryFarrell', 'Winery'),
('Handley', 'Winery'),
('KalinCellars', 'Winery'),
('KathrynKennedy', 'Winery'),
('LaneTanner', 'Winery'),
('Longridge', 'Winery'),
('Marietta', 'Winery'),
('McGuinnesso', 'Winery'),
('Mountadam', 'Winery'),
('MountEdenVineyard', 'Winery'),
('PageMillWinery', 'Winery'),
('PeterMccoy', 'Winery'),
('PulignyMontrachet', 'Winery'),
('SantaCruzMountainVineyard', 'Winery'),
('SaucelitoCanyon', 'Winery'),
('SchlossRothermel', 'Winery'),
('SchlossVolrad', 'Winery'),
('SeanThackrey', 'Winery'),
('Selaks', 'Winery'),
('SevreEtMaine', 'Winery'),
('StGenevieve', 'Winery'),
('Stonleigh', 'Winery'),
('Taylor', 'Winery'),
('Ventana', 'Winery'),
('WhitehallLane', 'Winery'),
}
wine_edges = wine_subclasses | wine_membership
ado_expected_nodes_subset = {
'immunotherapy',
'In_vitro_models',
'white',
'ProcessualEntity'
}
ado_expected_edges_subset = {
('control_trials_study_arm', 'Study_arm'),
('copper', 'MaterialEntity'),
('curcumin_plant', 'plant'),
('cytokine', 'cell_signalling') # Line 12389 of ado.owl
}
expected_prefixes = {
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"owl": "http://www.w3.org/2002/07/owl#"
}
class TestOwlUtils(unittest.TestCase):
def test_value_error(self):
with self.assertRaises(ValueError):
OWLParser()
def test_invalid_owl(self):
with self.assertRaises(Exception):
parse_owl('http://example.com/not_owl')
class TestParse(unittest.TestCase):
"""This class tests the parsing of OWL documents and doesn't need a connection"""
def test_parse_pizza_file(self):
owl = parse_owl(Path(test_owl_pizza).as_uri())
self.assertEqual(EXPECTED_PIZZA_NODES, set(owl.nodes()))
self.assertEqual(EXPECTED_PIZZA_EDGES, set(owl.edges()))
@mock_parse_owl_rdf
@mock_parse_owl_xml
def test_parse_pizza_url(self, m1, m2):
owl = parse_owl(pizza_iri)
self.assertEqual(pizza_iri, owl.graph['IRI'])
self.assertEqual(EXPECTED_PIZZA_NODES, set(owl.nodes()))
self.assertEqual(EXPECTED_PIZZA_EDGES, set(owl.edges()))
def test_parse_wine_file(self):
owl = parse_owl(Path(test_owl_wine).as_uri())
for node in sorted(wine_classes):
self.assertIn(node, owl)
for node in sorted(wine_individuals):
self.assertIn(node, owl)
for u, v in sorted(wine_subclasses):
self.assertIn(u, owl)
self.assertIn(v, owl.edge[u])
for u, v in sorted(wine_membership):
self.assertIn(u, owl)
self.assertIn(v, owl.edge[u])
@mock_parse_owl_rdf
@mock_parse_owl_xml
def test_ado_local(self, mock1, mock2):
ado_path = Path(test_owl_ado).as_uri()
owl = parse_owl(ado_path)
self.assertLessEqual(ado_expected_nodes_subset, set(owl.nodes_iter()))
self.assertLessEqual(ado_expected_edges_subset, set(owl.edges_iter()))
if __name__ == '__main__':
unittest.main()
| cthoyt/onto2nx | tests/test_owl.py | Python | gpl-3.0 | 6,315 |
from django import forms
from django.contrib.auth.models import User
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from inventory.user.models import Experimenter, Reader, Subject
class UserForm(forms.Form):
'''Form for creating a new User.
'''
USER_TYPE_CHOICES = (
('admin', 'Admin'),
('experimenter', 'Experimenter'),
('reader', 'Reader')
)
user_type = forms.ChoiceField(widget=forms.RadioSelect,
choices=USER_TYPE_CHOICES,
required=True)
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
email = forms.EmailField(required=True)
password1 = forms.CharField(widget=forms.PasswordInput, label='Password',
max_length=30, required=True)
password2 = forms.CharField(widget=forms.PasswordInput, label='Password (again)',
max_length=30, required=True)
def clean_email(self):
'''Check if user with this email already exists.
'''
try:
User.objects.get(email=self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
def clean(self):
'''Check that password1 and password2 match.
'''
# Check initial validation
if 'password1' and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError("Passwords don't match.")
return self.cleaned_data
def save(self):
'''Create the user of the specified user type.
'''
# Create the new user. Note: The username is the email address
new_user = User.objects.create_user(username=self.cleaned_data['email'],
email=self.cleaned_data['email'],
password=self.cleaned_data['password1'])
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
# Create the user object of the specified type
user_type = self.cleaned_data['user_type']
new_user.save()
if user_type == 'admin':
new_user.is_superuser = True
new_user.save()
elif user_type == 'experimenter':
return Experimenter.objects.create(user=new_user)
elif user_type == 'reader':
return Reader.objects.create(user=new_user)
return new_user
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'user_form'
self.helper.form_class = 'form-widget'
self.helper.form_method = 'post'
self.helper.layout = Layout(
Fieldset(
'Create User',
'user_type',
'first_name',
'last_name',
'email',
'password1',
'password2',
),
ButtonHolder(
Submit('submit','Submit')
)
)
super(UserForm, self).__init__(*args, **kwargs)
class SubjectForm(forms.ModelForm):
class Meta:
model = Subject
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'subject_form'
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Submit'))
super(SubjectForm, self).__init__(*args, **kwargs) | sloria/device-inventory | inventory/user/forms.py | Python | bsd-3-clause | 3,771 |
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
help='Config drive format. One of iso9660 (default) or vfat'),
cfg.StrOpt('config_drive_tempdir',
default=tempfile.tempdir,
help=('DEPRECATED (not needed any more): '
' Where to put temporary files associated with '
'config drive creation')),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path, tmpdir)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path, tmpdir)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
def __repr__(self):
return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">"
def required_by(instance):
return (instance.get('config_drive') or
'always' == CONF.force_config_drive or
strutils.bool_from_string(CONF.force_config_drive))
| viggates/nova | nova/virt/configdrive.py | Python | apache-2.0 | 6,402 |
# -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from datetime import datetime
import time
from osv import fields, osv
from tools.translate import _
from tools import ustr
#import tools
class gap_analysis_effort(osv.Model):
_name = "gap_analysis.effort"
_description = "Gap Analysis Efforts"
_columns = {
'name': fields.char('Effort', size=4, required=True,),
'unknown': fields.boolean('Undefined duration ?', help='If checked, when this effort is used, the user would have to specify the duration manually.'),
'duration': fields.float('Duration (hour)', help='Duration in hour for this effort.', required=True,),
}
def onchange_unknown(self, cr, uid, ids, unknown):
val = {}
val['unknown'] = unknown
if not unknown:
val['duration'] = 0.0
return {'value': val}
_order = 'name'
class gap_analysis_workload_type(osv.Model):
_name = "gap_analysis.workload.type"
_description = "Gap Analysis Workload Type"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'category': fields.selection([('Functional Analysis','Functional'), ('Technical Analysis','Technical')], 'Analysis', required=True,),
'code': fields.char('Code for Report', size=8, required=True, translate=True, help="Set the code if name is too long (eg: in reports)."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of workload type."),
'duration': fields.float('Duration (hour)', help='Default duration in hour for this type of workload.', required=True,),
}
_defaults = {
'sequence': 10,
'category': 'Functional Analysis',
'duration': 4,
}
_order = 'sequence'
class gap_analysis_workload(osv.Model):
_name = "gap_analysis.workload"
_description = "Gap Analysis Workload"
_columns = {
'gap_line_id': fields.many2one('gap_analysis.line', 'Gap-analysis Line', ondelete='cascade', select=True, readonly=True),
'fct_id': fields.many2one('gap_analysis.functionality', 'Gap-analysis Functionality Template', ondelete='cascade', select=True, readonly=True),
'type': fields.many2one('gap_analysis.workload.type', 'Type', required=True, select=True),
'duration': fields.float('Duration (hour)', help='Duration in hour for this task.', required=True,),
}
def onchange_type_id(self, cr, uid, ids, type_id):
val = {}
my_type = self.pool.get('gap_analysis.workload.type').browse(cr, uid, type_id)
val['duration'] = my_type.duration
return {'value': val}
class gap_analysis_functionality_category(osv.Model):
_inherit = "product.category"
_name = "gap_analysis.functionality.category"
_description = "Gap Analysis Functionality Categories"
def _category_to_update(self, cr, uid, ids, fields=None, arg=None, context=None):
if type(ids) != type([]):
ids = [ids]
return self.pool.get('gap_analysis.functionality.category').search(cr, uid, [], order='parent_left') or []
def _name_get_full_path(self, cursor, uid, ids, fields, arg, context=None):
result = {}
for category in self.browse(cursor, uid, ids):
full_path = ''
current_category = category
while current_category:
if full_path=='':
full_path = ustr(current_category.name)
else:
full_path = ustr(current_category.name) + ' / ' + full_path
current_category = current_category.parent_id or False
result[category.id] = full_path
return result
_columns = {
'parent_id': fields.many2one('gap_analysis.functionality.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('gap_analysis.functionality.category', 'parent_id', string='Child Categories'),
'code': fields.char('Code', size=8, required=True, help="Use for functionality sequencing."),
'full_path': fields.function(_name_get_full_path, type="char", method=True, size=2048, store={'gap_analysis.functionality.category': (_category_to_update, ['name','parent_id'], 10)}, string='Name'),
}
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct parent_id from gap_analysis_functionality_category where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
class gap_analysis_functionality(osv.Model):
_name = "gap_analysis.functionality"
_description = "Gap Analysis Functionalities"
_columns = {
'name': fields.char('Functionality', size=256, required=True, translate=True),
'description': fields.text('Description'),
'category': fields.many2one('gap_analysis.functionality.category', 'Category', required=True, select=True),
'is_tmpl': fields.boolean('Template ?', help='This Functionality is a Template ?'),
'proposed': fields.boolean('Propose as template ?'),
#### Default values (Templating) ####
'workloads': fields.one2many('gap_analysis.workload', 'fct_id', 'Default Workloads'),
'openerp_fct': fields.many2one('gap_analysis.openerp', 'Default OpenERP feature', select=True),
'critical': fields.integer('Default Critical Level', help='Indicator to specify the importance of this functionality in the project.'),
'testing': fields.float('Test (hour)'),
'effort': fields.many2one('gap_analysis.effort', 'Default Effort', help="Development Effort for this functionality."),
'duration_wk': fields.float('Default Duration (hour)', help='Since this effort has no pre-defined duration, you must set one.'),
'unknown_wk': fields.boolean('Must set the duration manually ? (Default)',),
}
def onchange_effort_id(self, cr, uid, ids, effort_id, unknown_wk):
val = {}
my_effort = self.pool.get('gap_analysis.effort').browse(cr, uid, effort_id)
val['unknown_wk'] = my_effort.unknown
return {'value': val}
def write(self, cr, uid, ids, vals, context=None):
if 'is_tmpl' in vals and vals['is_tmpl'] == True:
vals['proposed'] = False
return super(gap_analysis_functionality, self).write(cr, uid, ids, vals, context=context)
class gap_analysis_openerp(osv.Model):
_name = "gap_analysis.openerp"
_description = "Gap Analysis OpenERP features"
_columns = {
'name': fields.char('OpenERP feature', size=256, required=True, translate=True),
}
class gap_analysis(osv.Model):
_name = "gap_analysis"
_description = "Gap Analysis"
def _estimated_time_cost(self, cursor, uid, ids, fields, arg, context=None):
result = {}
for gap in self.browse(cursor, uid, ids):
res = {}
res['estimated_time'] = 0.0
res['estimated_cost'] = 0.0
for gap_line in gap.gap_lines:
if gap_line.keep:
res['estimated_time'] += gap_line.total_time
res['estimated_cost'] += gap_line.total_cost
result[gap.id] = res
return result
def _sorted_distinct_workloads(self, cursor, uid, ids, arg, context=None):
result = {}
for gap in self.browse(cursor, uid, ids):
types = []
line_ids = [l.id for l in gap.gap_lines]
if line_ids:
cursor.execute("SELECT id, code FROM gap_analysis_workload_type T WHERE id in (SELECT DISTINCT(W.type) FROM gap_analysis_workload W WHERE W.gap_line_id IN %s) ORDER BY T.sequence ASC",(tuple(line_ids),))
types = cursor.fetchall()
return types
def button_dummy(self, cr, uid, ids, context=None):
gapline_pool = self.pool.get('gap_analysis.line')
gap_cat_pool = self.pool.get('gap_analysis.functionality.category')
if type(ids) != type([]):
ids = [ids]
for gap_id in ids:
cr.execute("SELECT DISTINCT c.code FROM gap_analysis_line l, gap_analysis_functionality_category c WHERE l.category=c.id AND l.gap_id = %s",(gap_id,))
categ_codes = map(lambda x: x[0], cr.fetchall()) or []
for code in categ_codes:
idx = 1
seq = 999
cr.execute("SELECT id FROM gap_analysis_functionality_category WHERE id IN (SELECT DISTINCT c.id FROM gap_analysis_line l, gap_analysis_functionality_category c WHERE l.category=c.id AND c.code = %s AND l.gap_id = %s) ORDER BY parent_left",(code, gap_id,))
categ_ids = map(lambda x: x[0], cr.fetchall()) or []
for categ in gap_cat_pool.browse(cr, uid, categ_ids):
current_categ = categ
seq = ''
while current_categ:
seq = str(current_categ.sequence) + seq
current_categ = current_categ.parent_id or False
line_ids = gapline_pool.search(cr, uid, [('category','=',categ.id),('gap_id','=',gap_id)], order='critical desc, effort asc') or []
for line_id in line_ids:
code_line = code
code_line += str(idx).rjust(3, '0')
gapline_pool.write(cr, uid, [line_id], {'code':code_line,'seq':seq})
idx += 1
return True
def import_from_tmpl(self, cr, uid, ids, context=None):
return {
'name': _('Import from Template'),
'view_type': 'form',
'view_mode': 'form',
'view_id': False,
'res_model': 'gap_analysis.import_from_tmpl',
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': False,
}
def _get_lines(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('gap_analysis.line').browse(cr, uid, ids, context=context):
result[line.gap_id.id] = True
return result.keys()
def action_change(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state':'draft', 'date_confirm': False})
return True
def action_done(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state': 'done', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
return True
def action_cancel(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids):
self.write(cr, uid, [o.id], {'state': 'cancel'})
return True
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning'), _("Copying a Gap Analysis is currently not allowed."))
return False
def onchange_project_id(self, cr, uid, ids, project_id):
val = {}
my_project = self.pool.get('project.project').browse(cr, uid, project_id)
if my_project.partner_id:
val['partner_id'] = my_project.partner_id.id
return {'value': val}
_columns = {
'reference': fields.char('Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}, select=True),
'name': fields.char('Name', size=256, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('draft', 'Draft'), ('done', 'Done'), ('cancel', 'Cancelled')], 'State', readonly=True, help="Gives the state of the gap-analysis.", select=True),
'note': fields.text('Note'),
'date_create': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which the gap-analysis is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which the gap-analysis is confirmed."),
'user_id': fields.many2one('res.users', 'Analyst', readonly=True, states={'draft': [('readonly', False)]}, select=True),
'partner_id': fields.many2one('res.partner', 'Customer', select=True, readonly=True, states={'draft': [('readonly', False)]}, ),
'gap_lines': fields.one2many('gap_analysis.line', 'gap_id', 'Functionalities', readonly=True, states={'draft': [('readonly', False)]}),
'estimated_time': fields.function(_estimated_time_cost, type='float', multi="gapsums", string='Estimated Time', store = False),
'estimated_cost': fields.function(_estimated_time_cost, type='float', multi="gapsums", string='Estimated Selling Price', store = False),
'project_id': fields.many2one('project.project', 'Project'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'is_tmpl': fields.boolean('Template ?', help='This Gap Analysis is a Template ?'),
'tech_cost': fields.float('Technical Analysis Price', help='Default Price per hour for Technical Analysis.'),
'func_cost': fields.float('Functional Analysis Price', help='Default Price per hour for Functional Analysis.'),
'dev_cost': fields.float('Effort Price', help='Price per hour for Effort.'),
'user_functional': fields.many2one('res.users', 'Default Functional Analyst'),
'user_technical': fields.many2one('res.users', 'Default Technical Analyst'),
'user_dev': fields.many2one('res.users', 'Default Developer'),
'user_test': fields.many2one('res.users', 'Default Tester'),
}
_defaults = {
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'user_functional': lambda obj, cr, uid, context: uid,
'reference': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'gap_analysis'),
'date_create': fields.date.context_today,
'tech_cost': 500.0,
'func_cost': 500.0,
'dev_cost': 250.0,
}
_sql_constraints = [
('reference_uniq', 'unique(reference)', 'Reference must be unique !'),
]
_order = 'name desc'
class gap_analysis_line(osv.Model):
_name = "gap_analysis.line"
_description = "Gap-analysis Lines"
def _estimated_line_time_cost(self, cursor, uid, ids, fields, arg, context=None):
result = {}
gap = False
for gap_line in self.browse(cursor, uid, ids):
res = {}
res['total_time'] = 0
res['total_cost'] = 0
if not gap:
gap = self.pool.get("gap_analysis").browse(cursor, uid, gap_line.gap_id.id)
if gap_line.effort:
if gap_line.effort.unknown:
thistime = gap_line.duration_wk
else:
thistime = gap_line.effort.duration
res['total_time'] = thistime
res['total_cost'] = (gap.dev_cost * thistime)
for workload in gap_line.workloads:
if workload.type.category == "Technical Analysis":
workload_cost = gap.tech_cost
else:
workload_cost = gap.func_cost
res['total_time'] += workload.duration
res['total_cost'] += (workload.duration * workload_cost)
if gap_line.testing:
res['total_time'] += gap_line.testing
res['total_cost'] += (gap_line.testing * gap.tech_cost)
result[gap_line.id] = res
return result
def _get_lines_from_workload(self, cr, uid, ids, context=None):
result = {}
for workload in self.pool.get('gap_analysis.workload').browse(cr, uid, ids, context=context):
result[workload.gap_line_id.id] = True
return result.keys()
def _total_workloads(self, cursor, uid, ids, arg, context=None):
result = {}
for line in self.browse(cursor, uid, ids):
amount = 0
for w in line.workloads:
if w.type.id == arg:
amount += w.duration
return amount
def onchange_functionality_id(self, cr, uid, ids, functionality_id, gap_line_id):
val = {}
functionality_tmpl = self.pool.get('gap_analysis.functionality').browse(cr, uid, functionality_id)
if functionality_tmpl.effort:
val['effort'] = functionality_tmpl.effort.id
if functionality_tmpl.category:
val['category'] = functionality_tmpl.category.id
if functionality_tmpl.testing:
val['testing'] = functionality_tmpl.testing
if functionality_tmpl.unknown_wk:
val['unknown_wk'] = functionality_tmpl.unknown_wk
if functionality_tmpl.duration_wk:
val['duration_wk'] = functionality_tmpl.duration_wk
if functionality_tmpl.critical:
val['critical'] = functionality_tmpl.critical
if functionality_tmpl.openerp_fct:
val['openerp_fct'] = functionality_tmpl.openerp_fct.id
if functionality_tmpl.workloads:
workload_pool = self.pool.get('gap_analysis.workload')
my_workloads = []
for workload in functionality_tmpl.workloads:
workload_vals = {'type':workload.type.id,'duration':workload.duration,}
if gap_line_id:
workload_vals['gap_line_id'] = gap_line_id
workload_id = workload_pool.create(cr, uid, workload_vals)
if workload_id:
my_workloads.append(workload_id)
if my_workloads:
val['workloads'] = my_workloads
return {'value': val}
def onchange_effort_id(self, cr, uid, ids, effort_id, unknown_wk):
val = {}
my_effort = self.pool.get('gap_analysis.effort').browse(cr, uid, effort_id)
val['unknown_wk'] = my_effort.unknown
return {'value': val}
_columns = {
'gap_id': fields.many2one('gap_analysis', 'Gap-analysis', required=True, ondelete='cascade', select=True, readonly=True),
'seq': fields.char('Sequence', size=48),
'code': fields.char('Code', size=6),
'functionality': fields.many2one('gap_analysis.functionality', 'Functionality', required=True, select=True),
'category': fields.many2one('gap_analysis.functionality.category', 'Category', required=True, select=True),
'workloads': fields.one2many('gap_analysis.workload', 'gap_line_id', 'Workloads'),
'total_time': fields.function(_estimated_line_time_cost, method=True, type='float', multi=True, string='Estimated Time', store = {'gap_analysis.line': (lambda self, cr, uid, ids, c={}: ids, ['testing','workloads','duration_wk','effort','unknown_wk'], 10),'gap_analysis.workload': (_get_lines_from_workload, ['workload', 'duration'], 10),}),
'total_cost': fields.function(_estimated_line_time_cost, method=True, type='float', multi=True, string='Estimated Selling Price', store = {'gap_analysis.line': (lambda self, cr, uid, ids, c={}: ids, ['testing','workloads','duration_wk','effort','unknown_wk'], 10),'gap_analysis.workload': (_get_lines_from_workload, ['workload', 'duration'], 10),}),
'openerp_fct': fields.many2one('gap_analysis.openerp', 'OpenERP feature', select=True),
'contributors': fields.char('Contributor', size=256, help='Who is/are your main contact(s) to define this functionality.'),
'keep': fields.boolean('Keep ?', help='Keep the functionality in the Gap Analysis. If unchecked, the functionality will be print in the report but not used for the price calculation.'),
'critical': fields.integer('Critical Level', help='Indicator to specify the importance of this functionality in the project.'),
'testing': fields.float('Test (hour)'),
'effort': fields.many2one('gap_analysis.effort', 'Effort', help="Development Effort for this functionality."),
'duration_wk': fields.float('Duration (hour)', help='Since this effort has no pre-defined duration, you must set one.'),
'unknown_wk': fields.boolean('Must set the duration manually ?',),
}
_defaults = {
'unknown_wk': False,
'keep': True,
'critical': 1,
}
_order = 'seq asc, code asc'
_rec_name = 'code'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | Elico-Corp/openerp-7.0 | gap_analysis/gap_analysis.py | Python | agpl-3.0 | 21,590 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"workqueue.c:480",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| cristianomatos/android_kernel_motorola_msm8226 | scripts/gcc-wrapper.py | Python | gpl-2.0 | 3,405 |
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Revisions copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests the basic functionality of the KEGG parsers."""
from __future__ import print_function
import os
from Bio.KEGG import Enzyme
from Bio.KEGG import Compound
from Bio.KEGG import Map
from Bio.Pathway import System
# TODO - use unittest instead of print-and-compare testing
test_KEGG_Enzyme_files = ["enzyme.sample", "enzyme.irregular", "enzyme.new"]
test_KEGG_Compound_files = ["compound.sample", "compound.irregular"]
test_KEGG_Map_files = ["map00950.rea"]
def t_KEGG_Enzyme(testfiles):
"""Tests Bio.KEGG.Enzyme functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Enzyme on " + file + "\n\n")
records = Enzyme.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
def t_KEGG_Compound(testfiles):
"""Tests Bio.KEGG.Compound functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Compound on " + file + "\n\n")
records = Compound.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
def t_KEGG_Map(testfiles):
"""Tests Bio.KEGG.Map functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Map on " + file + "\n\n")
reactions = Map.parse(fh)
system = System()
for reaction in reactions:
system.add_reaction(reaction)
# sort the reaction output by the string names, so that the
# output will be consistent between python versions
# def str_cmp(first, second):
# return cmp(str(first), str(second))
rxs = system.reactions()
# sort: key instead of compare function (for py3 support)
# The function str_cmp above can be removed if the
# solution below proves resilient
rxs.sort(key=lambda x: str(x))
for x in rxs:
print(str(x))
fh.close()
t_KEGG_Enzyme(test_KEGG_Enzyme_files)
t_KEGG_Compound(test_KEGG_Compound_files)
t_KEGG_Map(test_KEGG_Map_files)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_KEGG.py | Python | gpl-2.0 | 2,438 |
import os
from fabric import task
from django.utils.termcolors import colorize
# 1. Local: chmod 400 ~/.ssh/aws.pem
# 2. Local: ssh-add ~/.ssh/aws.pem OR ~/.ssh/config: Append to Host: IdentityFile ~/.ssh/aws.pem
# 3. Local: Edit hosts, repo_name, pythonpath (if necessary)
# 4. Remote: Copy .env to to {code_dir}/.env:
hosts = [{
'host': os.environ.get('HOST', ''),
'user': 'ubuntu',
}]
repo_name = 'emojiweather'
pythonpath = f'{repo_name}/'
service_name = repo_name
code_dir = f'/home/ubuntu/{repo_name}/'
@task
def update(c):
print(colorize('\nUpdating code...', fg='white'))
c.run(f'cd {code_dir} && git pull origin master')
@task
def install(c):
print(colorize('\nInstalling dependencies...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && pip install -r requirements.txt')
@task
def migrate(c):
print(colorize('\nMigrating database...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}.env && cd {code_dir} && source env/bin/activate && python {pythonpath}manage.py migrate --noinput', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def collect(c):
print(colorize('\nCopying static files...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && python {pythonpath}manage.py collectstatic --noinput')
@task
def clear(c):
print(colorize('\nDeleting sessions...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}.env && cd {code_dir} && source env/bin/activate && python {pythonpath}manage.py clearsessions', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def restart(c):
print(colorize('\nRestarting web server...\n', fg='white'))
c.run(f'sudo systemctl restart {service_name}')
c.run(f'sudo systemctl status {service_name}')
print('')
c.run('sudo systemctl restart nginx')
c.run('sudo systemctl status nginx')
@task(hosts=hosts)
def deploy(c):
print(colorize('\nStarting deploy... \U0001F44C', fg='green'))
try:
update(c)
install(c)
migrate(c)
collect(c)
# clear(c)
restart(c)
print(colorize('\nDeploy succeeded \U0001F389', fg='green'))
except:
print(colorize('\nDeploy failed \u274C', fg='red'))
| richardcornish/smsweather | fabfile.py | Python | bsd-3-clause | 2,296 |
from django.conf.urls import url, include
from database.views import work_database
from rest_framework import routers
from database import views
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'member', views.MemberViewSet)
urlpatterns = [
url(r'^rest/', include(router.urls)),
url(r'^$', work_database),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] | sergey-egv/DJandExtJSTest | database/urls.py | Python | gpl-3.0 | 423 |
#!/bin/env python
"""
# Automatically translated python version of
# OpenSceneGraph example program "osganimate"
# Translated from file 'osganimate.cpp'
# OpenSceneGraph example, osganimate.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
"""
import sys
import math
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgSim
from osgpypp import osgUtil
from osgpypp import osgViewer
def createAnimationPath(center, radius, looptime):
# set up the animation path
animationPath = osg.AnimationPath()
animationPath.setLoopMode(osg.AnimationPath.LOOP)
numSamples = 40
yaw = 0.0
yaw_delta = 2.0*osg.PI/(numSamples-1.0)
roll = osg.inDegrees(30.0)
time = 0.0
time_delta = looptime/float(numSamples)
for i in range(numSamples):
position = center+osg.Vec3(math.sin(yaw)*radius,math.cos(yaw)*radius,0.0)
rotation = osg.Quat(roll,osg.Vec3(0.0,1.0,0.0))*osg.Quat(-(yaw+osg.inDegrees(90.0)),osg.Vec3(0.0,0.0,1.0))
animationPath.insert(time,osg.AnimationPath.ControlPoint(position,rotation))
yaw += yaw_delta
time += time_delta
return animationPath
def createBase(center, radius):
numTilesX = 10
numTilesY = 10
width = 2*radius
height = 2*radius
v000 = center - osg.Vec3(width*0.5,height*0.5,0.0)
dx = osg.Vec3(width/(float(numTilesX)),0.0,0.0)
dy = osg.Vec3(0.0,height/(float(numTilesY)),0.0)
# fill in vertices for grid, note numTilesX+1 * numTilesY+1...
coords = osg.Vec3Array()
for iy in range(numTilesY):
for ix in range(numTilesX):
coords.append(v000+dx*float(ix)+dy*float(iy))
#Just two colours - black and white.
colors = osg.Vec4Array()
colors.append(osg.Vec4(1.0,1.0,1.0,1.0)) # white
colors.append(osg.Vec4(0.0,0.0,0.0,1.0)) # black
whitePrimitives = osg.DrawElementsUShort(osg.GL_QUADS)
blackPrimitives = osg.DrawElementsUShort(osg.GL_QUADS)
numIndicesPerRow = numTilesX+1
for iy in range(numTilesY):
for ix in range(numTilesX):
primitives = whitePrimitives if (((iy+ix)%2==0)) else blackPrimitives
primitives.append(ix +(iy+1)*numIndicesPerRow)
primitives.append(ix +iy*numIndicesPerRow)
primitives.append((ix+1)+iy*numIndicesPerRow)
primitives.append((ix+1)+(iy+1)*numIndicesPerRow)
# set up a single normal
normals = osg.Vec3Array()
normals.append(osg.Vec3(0.0,0.0,1.0))
geom = osg.Geometry()
geom.setVertexArray(coords)
geom.setColorArray(colors, osg.Array.BIND_PER_PRIMITIVE_SET)
geom.setNormalArray(normals, osg.Array.BIND_OVERALL)
geom.addPrimitiveSet(whitePrimitives)
geom.addPrimitiveSet(blackPrimitives)
geode = osg.Geode()
geode.addDrawable(geom)
return geode
def createMovingModel(center, radius):
animationLength = 10.0
animationPath = createAnimationPath(center,radius,animationLength)
model = osg.Group()
glider = osgDB.readNodeFile("glider.osgt")
if glider :
bs = glider.getBound()
size = radius/bs.radius()*0.3
positioned = osg.MatrixTransform()
positioned.setDataVariance(osg.Object.STATIC)
positioned.setMatrix(osg.Matrix.translate(-bs.center())*
osg.Matrix.scale(size,size,size)*
osg.Matrix.rotate(osg.inDegrees(-90.0),0.0,0.0,1.0))
positioned.addChild(glider)
xform = osg.PositionAttitudeTransform()
xform.setUpdateCallback(osg.AnimationPathCallback(animationPath,0.0,1.0))
xform.addChild(positioned)
model.addChild(xform)
cessna = osgDB.readNodeFile("cessna.osgt")
if cessna :
bs = cessna.getBound()
size = radius/bs.radius()*0.3
positioned = osg.MatrixTransform()
positioned.setDataVariance(osg.Object.STATIC)
positioned.setMatrix(osg.Matrix.translate(-bs.center())*
osg.Matrix.scale(size,size,size)*
osg.Matrix.rotate(osg.inDegrees(180.0),0.0,0.0,1.0))
positioned.addChild(cessna)
xform = osg.MatrixTransform()
xform.setUpdateCallback(osg.AnimationPathCallback(animationPath,0.0,2.0))
xform.addChild(positioned)
model.addChild(xform)
return model
def createModel(overlay, technique):
center = osg.Vec3(0.0,0.0,0.0)
radius = 100.0
root = osg.Group()
baseHeight = center.z - radius*0.5
baseModel = createBase(osg.Vec3(center.x, center.y, baseHeight),radius)
movingModel = createMovingModel(center,radius*0.8)
if overlay :
overlayNode = osgSim.OverlayNode(technique)
overlayNode.setContinuousUpdate(True)
overlayNode.setOverlaySubgraph(movingModel)
overlayNode.setOverlayBaseHeight(baseHeight-0.01)
overlayNode.addChild(baseModel)
root.addChild(overlayNode)
else:
root.addChild(baseModel)
root.addChild(movingModel)
return root
def main(argv):
overlay = False
arguments = osg.ArgumentParser(argv)
while arguments.read("--overlay") :
overlay = True
technique = osgSim.OverlayNode.OBJECT_DEPENDENT_WITH_ORTHOGRAPHIC_OVERLAY
while arguments.read("--object") :
technique = osgSim.OverlayNode.OBJECT_DEPENDENT_WITH_ORTHOGRAPHIC_OVERLAY
overlay=True
while arguments.read("--ortho") or arguments.read("--orthographic") :
technique = osgSim.OverlayNode.VIEW_DEPENDENT_WITH_ORTHOGRAPHIC_OVERLAY
overlay=True
while arguments.read("--persp") or arguments.read("--perspective") :
technique = osgSim.OverlayNode.VIEW_DEPENDENT_WITH_PERSPECTIVE_OVERLAY
overlay=True
# initialize the viewer.
viewer = osgViewer.Viewer()
# load the nodes from the commandline arguments.
model = createModel(overlay, technique)
if not model:
return 1
# tilt the scene so the default eye position is looking down on the model.
rootnode = osg.MatrixTransform()
rootnode.setMatrix(osg.Matrix.rotate(osg.inDegrees(30.0),1.0,0.0,0.0))
rootnode.addChild(model)
# run optimization over the scene graph
optimzer = osgUtil.Optimizer()
optimzer.optimize(rootnode)
# set the scene to render
viewer.setSceneData(rootnode)
viewer.setCameraManipulator(osgGA.TrackballManipulator())
# viewer.setUpViewOnSingleScreen(1)
# normal viewer usage.
return viewer.run()
if __name__ == "__main__":
retval = main(sys.argv)
| JaneliaSciComp/osgpyplusplus | examples/debugging2/osganimate.py | Python | bsd-3-clause | 7,471 |
from django.conf import settings
from django.db.models import Count, ExpressionWrapper, F, FloatField, Sum
from django.db.models.functions import Cast
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django_filters.rest_framework import DjangoFilterBackend
from api.activity.serializers import CodelistSerializer
from api.aggregation.views import Aggregation, AggregationView, GroupBy
from api.budget import filters
from api.budget.filters import RelatedOrderingFilter
from api.budget.serializers import BudgetSerializer
from api.country.serializers import CountrySerializer
from api.generics.filters import SearchFilter
from api.generics.views import DynamicListView
from api.organisation.serializers import OrganisationAggregationSerializer
from api.region.serializers import RegionSerializer
from api.sector.serializers import SectorSerializer
from geodata.models import Country, Region
from iati.models import (
ActivityParticipatingOrganisation, ActivityStatus, Budget,
CollaborationType, DocumentCategory, Organisation, OrganisationType,
Sector
)
from iati_codelists.models import BudgetType
# These are the accepted currencies
currencies = [
'xdr',
'usd',
'eur',
'gbp',
'jpy',
'cad'
]
def annotate_currency(query_params, groupings):
"""
Choose the right currency field,
and aggregate differently based on group_by
"""
currency = query_params.get('convert_to')
if currency:
currency = currency.lower()
if currency is None or currency not in currencies:
currency_field = 'value'
else:
currency_field = currency + '_value'
annotation_components = F(currency_field)
param_additions = []
for param in query_params:
if param == 'sector':
param_additions.append('budgetsector__percentage')
grouping_additions = []
for grouping in groupings:
if grouping.query_param == 'sector':
grouping_additions.append('budgetsector__percentage')
additions = list(set(param_additions).union(grouping_additions))
for percentage_field in additions:
percentage_expression = Cast(percentage_field,
output_field=FloatField()) / 100.0
annotation_components = annotation_components * percentage_expression
return ExpressionWrapper(Sum(annotation_components),
output_field=FloatField())
class BudgetAggregations(AggregationView):
"""
Returns aggregations based on the item grouped by,
and the selected aggregation.
## Group by options
API request has to include `group_by` parameter.
This parameter controls result aggregations and
can be one or more (comma separated values) of:
- `recipient_country` Non percentage weighted
- `recipient_region` Non percentage weighted
- `sector` Percentage weighted
- `related_activity`
- `reporting_organisation`
- `participating_organisation`
- `participating_organisation_type`
- `document_link_category`
- `activity_status`
- `collaboration_type`
- `budget_period_start_year`
- `budget_period_end_year`
- `budget_period_start_quarter`
- `budget_period_end_quarter`
- `budget_period_start_month`
- `budget_period_end_month`
- `budget_type`
## Aggregation options
API request has to include `aggregations` parameter.
This parameter controls result aggregations and
can be one or more (comma separated values) of:
- `count` Count of budgets
- `activity_count` Count of activities
- `value` Sum of budget value (in the selected currency)
## Request parameters
All filters available on the Activity List, can be used on aggregations.
"""
queryset = Budget.objects.all()
filter_backends = (SearchFilter, DjangoFilterBackend,)
filter_class = filters.BudgetFilter
allowed_aggregations = (
Aggregation(
query_param='count',
field='count',
annotate=Count('id'),
),
Aggregation(
query_param='activity_count',
field='activity_count',
annotate=Count('activity', distinct=True),
),
Aggregation(
query_param='value',
field='value',
annotate=annotate_currency,
),
)
allowed_groupings = (
GroupBy(
query_param="recipient_country",
fields="activity__recipient_country",
renamed_fields="recipient_country",
queryset=Country.objects.all(),
serializer=CountrySerializer,
serializer_fields=('url', 'code', 'name', 'location', 'region'),
name_search_field='activity__recipient_country__name',
renamed_name_search_field='recipient_country_name',
),
GroupBy(
query_param="recipient_region",
fields="activity__recipient_region__code",
renamed_fields="recipient_region",
queryset=Region.objects.all(),
serializer=RegionSerializer,
serializer_fk='code',
serializer_fields=('url', 'code', 'name',),
name_search_field="activity__recipient_region__name",
renamed_name_search_field="recipient_region_name",
),
GroupBy(
query_param="sector",
fields="budgetsector__sector__code",
renamed_fields="sector",
queryset=Sector.objects.all(),
serializer=SectorSerializer,
# though code is not fk, it is used in searching sector code
# in Sector model.
serializer_fk='code',
serializer_fields=('url', 'code', 'name'),
name_search_field="budgetsector__sector__name",
renamed_name_search_field="sector_name",
),
GroupBy(
query_param="related_activity",
fields=(
"activity__relatedactivity__ref_activity__iati_identifier"),
renamed_fields="related_activity",
),
GroupBy(
query_param="reporting_organisation",
fields="activity__reporting_organisations__organisation__id",
renamed_fields="reporting_organisation",
queryset=Organisation.objects.all(),
serializer=OrganisationAggregationSerializer,
serializer_main_field='id',
name_search_field= # NOQA: E251
"activity__reporting_organisations__organisation__primary_name",
renamed_name_search_field="reporting_organisation_name"
),
GroupBy(
query_param="participating_organisation",
fields="activity__participating_organisations__ref",
renamed_fields="participating_organisation",
queryset=ActivityParticipatingOrganisation.objects.all(),
name_search_field= # NOQA: E251
"activity__participating_organisations__ref",
renamed_name_search_field="participating_organisation_name"
),
GroupBy(
query_param="participating_organisation_type",
fields="activity__participating_organisations__type",
renamed_fields="participating_organisation_type",
queryset=OrganisationType.objects.all(),
serializer=CodelistSerializer,
name_search_field= # NOQA: E251
"activity__participating_organisations__type__name",
renamed_name_search_field="participating_organisations_type_name"
),
GroupBy(
query_param="document_link_category",
fields="activity__documentlink__categories__code",
renamed_fields="document_link_category",
queryset=DocumentCategory.objects.all(),
serializer=CodelistSerializer,
name_search_field="activity__documentlink__categories__name",
renamed_name_search_field="document_link_category_name"
),
GroupBy(
query_param="activity_status",
fields="activity__activity_status",
renamed_fields="activity_status",
queryset=ActivityStatus.objects.all(),
serializer=CodelistSerializer,
name_search_field="activity__activity_status__name",
renamed_name_search_field="activity_status_name"
),
GroupBy(
query_param="collaboration_type",
fields="activity__collaboration_type",
renamed_fields="collaboration_type",
queryset=CollaborationType.objects.all(),
serializer=CodelistSerializer,
name_search_field="activity__collaboration_type__name",
renamed_name_search_field="collaboration_type_name"
),
GroupBy(
query_param="budget_type",
fields=("type"),
queryset=BudgetType.objects.all(),
serializer=CodelistSerializer,
),
GroupBy(
query_param="budget_period_start_year",
extra={
'select': {
'budget_period_start_year':
'EXTRACT(YEAR FROM "period_start")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_start")::integer IS NOT NULL',
],
},
fields="budget_period_start_year",
),
GroupBy(
query_param="budget_period_end_year",
extra={
'select': {
'budget_period_end_year':
'EXTRACT(YEAR FROM "period_end")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_end")::integer IS NOT NULL',
],
},
fields="budget_period_end_year",
),
GroupBy(
query_param="budget_period_start_quarter",
extra={
'select': {
'budget_period_start_year':
'EXTRACT(YEAR FROM "period_start")::integer',
'budget_period_start_quarter':
'EXTRACT(QUARTER FROM "period_start")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_start")::integer IS NOT NULL',
'EXTRACT(QUARTER FROM "period_start")::integer IS NOT NULL'
],
},
fields=("budget_period_start_year", "budget_period_start_quarter")
),
GroupBy(
query_param="budget_period_end_quarter",
extra={
'select': {
'budget_period_end_year':
'EXTRACT(YEAR FROM "period_end")::integer',
'budget_period_end_quarter':
'EXTRACT(QUARTER FROM "period_end")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_end")::integer IS NOT NULL',
'EXTRACT(QUARTER FROM "period_end")::integer IS NOT NULL',
],
},
fields=("budget_period_end_year", "budget_period_end_quarter")
),
GroupBy(
query_param="budget_period_start_month",
extra={
'select': {
'budget_period_start_year':
'EXTRACT(YEAR FROM "period_start")::integer',
'budget_period_start_month':
'EXTRACT(MONTH FROM "period_start")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_start")::integer IS NOT NULL',
'EXTRACT(MONTH FROM "period_start")::integer IS NOT NULL',
],
},
fields=("budget_period_start_year", "budget_period_start_month")
),
GroupBy(
query_param="budget_period_end_month",
extra={
'select': {
'budget_period_end_yer':
'EXTRACT(YEAR FROM "period_end")::integer',
'budget_period_end_month':
'EXTRACT(MONTH FROM "period_end")::integer',
},
'where': [
'EXTRACT(YEAR FROM "period_end")::integer IS NOT NULL',
'EXTRACT(MONTH FROM "period_end")::integer IS NOT NULL',
],
},
fields=("budget_period_end_year", "budget_period_end_month")
),
)
@method_decorator(
cache_page(settings.CACHES.get('default').get('TIMEOUT'))
)
def dispatch(self, *args, **kwargs):
return super(BudgetAggregations, self).dispatch(*args, **kwargs)
class BudgetList(DynamicListView):
"""
Returns a list of IATI Budget stored in OIPA.
## Filter parameters
- `activity_id` (*optional*): Comma separated list of activity id's.
- `type` (*optional*): Comma separated list of activity id's.
"""
queryset = Budget.objects.all()
filter_backends = (
SearchFilter,
DjangoFilterBackend,
RelatedOrderingFilter,
)
filter_class = filters.BudgetFilter
serializer_class = BudgetSerializer
# make sure we can always have info about selectable fields,
# stored into dict. This dict is populated in the DynamicView class using
# _get_query_fields methods.
selectable_fields = ()
break_down_by = 'sectors'
# Required fields for the serialisation defined by the
# specification document
fields = (
'iati_identifier',
'sectors',
'recipient_regions',
'recipient_countries',
'budgets'
)
# column headers with paths to the json property value.
# reference to the field name made by the first term in the path
# example: for recipient_countries.country.code path
# reference field name is first term, meaning recipient_countries.
csv_headers = \
{
'iati_identifier': {'header': 'activity_id'},
'sectors.sector.code': {'header': 'sector_code'},
'sectors.percentage': {'header': 'sectors_percentage'},
'recipient_countries.country.code': {'header': 'country'},
'recipient_regions.region.code': {'header': 'region'},
}
exceptional_fields = [{'budgets': []}] # NOQA: E501
'''
# Required fields for the serialisation defined by the
# specification document
fields = (
'activity_id',
'type',
'status',
'period_start',
'period_end',
'value',
'iati_identifier',
'sectors',
'recipient_countries',
'recipient_regions'
)
'''
| zimmerman-zimmerman/OIPA | OIPA/api/budget/views.py | Python | agpl-3.0 | 14,887 |
"""Support for Epson projector."""
from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
SUPPORT_EPSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Epson projector from a config entry."""
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
def __init__(self, projector, name, unique_id, entry):
"""Initialize entity to control Epson projector."""
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
"""Set unique id for projector config entry."""
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
"""Get attributes about the device."""
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_EPSON
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| rohitranjan1991/home-assistant | homeassistant/components/epson/media_player.py | Python | mit | 7,843 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019
#
# Distributed under terms of the MIT license.
from fnapy.utils import Message
from fnapy.fnapy_manager import FnapyManager
from fnapy.connection import FnapyConnection
from tests import make_requests_get_mock, fake_manager
from lxml import etree
try:
from unittest import mock
except ImportError:
import mock
def do_nothing(*args, **kwargs):
pass
def test_query_messages(fake_manager):
"""
We should be able to decode properly the messages we receive
"""
# This example contains the subject 'Nouvelle réclamation'
with mock.patch(
'fnapy.fnapy_manager.FnapyManager._get_response',
make_requests_get_mock('query_messages_response.xml')
):
response = fake_manager.query_messages()
# Once decoded in UTF-8, the xml string should contain the correct
# characters.
assert 'Nouvelle réclamation' in response.content.decode('utf8')
def test_update_messages(fake_manager):
"""
The message we send should have the correct encoding
"""
message1 = Message(
action='mark_as_read',
id=u'some_id',
subject=u'order_information',
description='Chère Valérià...'
)
with mock.patch(
'fnapy.fnapy_manager.FnapyManager._get_response',
side_effect=do_nothing
) as m:
fake_manager.update_messages([message1])
# The xml string passed to _get_response (called by update_messages) should
# be encoded in UTF-8. Decoding it should give us the original message
assert message1.description in m.call_args[0][1].decode('utf8')
def test_get_response_with_badly_encoded_bytes(fake_manager):
"""
FnapyManager._get_response should return a correct `utils.Response` even if the raw
bytes are not properly encoded in UTF8
"""
xml_request = b"""
<?xml version='1.0' encoding='utf-8'?>
<messages_query xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"
partner_id="X" shop_id="X" token="X"
results_count="100">
<paging>1</paging>
</messages_query>
"""
# This example contains the subject 'Nouvelle réclamation'
with mock.patch('fnapy.fnapy_manager.requests.post') as post:
resp = make_requests_get_mock('query_messages_response.xml')()
m = mock.Mock()
# We purposely encode the body in ISO-8859-1 instead of UTF8
m.content = resp.text.encode('ISO-8859-1')
m.text = resp.text
post.return_value = m
response = fake_manager._get_response(
etree.Element('messages_query'), xml_request
)
# Once decoded in UTF-8, the xml string should contain the correct
# characters.
assert 'Nouvelle réclamation' in response.xml.decode('utf8')
| alexandriagroup/fnapy | tests/offline/test_encoding.py | Python | mit | 2,864 |
class YamaleError(ValueError):
def __init__(self, results):
super(YamaleError, self).__init__('\n'.join([str(x) for x in list(filter(lambda x: not x.isValid(), results))]))
self.message = self.args[0]
self.results = results
| 23andMe/Yamale | yamale/yamale_error.py | Python | mit | 252 |
# encoding: utf-8
from __future__ import print_function
import objc, time, math, sys, os, re, traceback, copy, datetime
from Foundation import NSObject, NSString, NSArray, NSMutableArray, NSMutableDictionary, NSDictionary, NSNumber, NSConcreteValue, \
NSClassFromString, NSUserDefaults, NSURL, NSNotificationCenter, NSMakePoint, NSNotFound, NSAttributedString, \
NSMutableAttributedString, NSLog, NSBundle, NSAffineTransform, NSPoint, NSRect, NSRange, NSUserNotification, \
NSUserNotificationCenter, NSDate, NSIndexSet
from AppKit import NSApp, NSDocumentController, NSOpenPanel, NSSavePanel, NSOKButton, NSWorkspace, \
NSMenuItem, NSOnState, NSOffState, NSMixedState, NSColor
GSAlignmentZone = objc.lookUpClass("GSAlignmentZone")
GSAnchor = objc.lookUpClass("GSAnchor")
GSAnnotation = objc.lookUpClass("GSAnnotation")
GSApplication = objc.lookUpClass("GSApplication")
GSBackgroundImage = objc.lookUpClass("GSBackgroundImage")
GSBackgroundLayer = objc.lookUpClass("GSBackgroundLayer")
GSClass = objc.lookUpClass("GSClass")
GSComponent = objc.lookUpClass("GSComponent")
GSControlLayer = objc.lookUpClass("GSControlLayer")
GSCustomParameter = objc.lookUpClass("GSCustomParameter")
GSDocument = objc.lookUpClass("GSDocument")
GSProjectDocument = objc.lookUpClass("GSProjectDocument")
GSEditViewController = objc.lookUpClass("GSEditViewController")
GSElement = objc.lookUpClass("GSElement")
GSFeature = objc.lookUpClass("GSFeature")
GSFeaturePrefix = objc.lookUpClass("GSFeaturePrefix")
GSFont = objc.lookUpClass("GSFont")
GSFontMaster = objc.lookUpClass("GSFontMaster")
GSGlyph = objc.lookUpClass("GSGlyph")
GSGlyphInfo = objc.lookUpClass("GSGlyphInfo")
GSGlyphsInfo = objc.lookUpClass("GSGlyphsInfo")
GSGuideLine = objc.lookUpClass("GSGuideLine")
GSHint = objc.lookUpClass("GSHint")
GSInstance = objc.lookUpClass("GSInstance")
GSLayer = objc.lookUpClass("GSLayer")
GSNode = objc.lookUpClass("GSNode")
GSPath = objc.lookUpClass("GSPath")
GSSubstitution = objc.lookUpClass("GSSubstitution")
GSPartProperty = objc.lookUpClass("GSPartProperty")
MGOrderedDictionary = objc.lookUpClass("MGOrderedDictionary")
GSNotifyingDictionary = objc.lookUpClass("GSNotifyingDictionary")
GSPathFinder = objc.lookUpClass("GSPathOperator")
GSPathPen = objc.lookUpClass("GSPathPen")
GSCallbackHandler = objc.lookUpClass("GSCallbackHandler")
GSInterpolationFontProxy = objc.lookUpClass("GSInterpolationFontProxy")
GSFeatureGenerator = objc.lookUpClass("GSFeatureGenerator")
GSTTStem = objc.lookUpClass("GSTTStem")
__all__ = [
"Glyphs", "GetFile",
"wrapperVersion",
"GSAlignmentZone", "GSAnchor", "GSAnnotation", "GSApplication", "GSBackgroundImage", "GSBackgroundLayer", "GSClass", "GSComponent", "GSControlLayer", "GSCustomParameter", "GSDocument", "GSProjectDocument", "GSEditViewController", "GSElement", "GSFeature", "GSFeaturePrefix", "GSFont", "GSFontMaster", "GSGlyph", "GSGlyphInfo", "GSGlyphsInfo", "GSGuideLine", "GSHint", "GSInstance", "GSLayer", "GSNode", "GSPath", "GSSubstitution", "GSPartProperty", "GSNotifyingDictionary", "GSPathFinder", "GSPathPen", "GSCallbackHandler", "GSFeatureGenerator", "GSTTStem",
# Constants
"MOVE", "LINE", "CURVE", "OFFCURVE", "QCURVE", "GSMOVE", "GSLINE", "GSCURVE", "GSOFFCURVE", "GSSHARP", "GSSMOOTH",
"TAG", "TOPGHOST", "STEM", "BOTTOMGHOST", "FLEX", "TTANCHOR", "TTSTEM", "TTALIGN", "TTINTERPOLATE", "TTDIAGONAL", "TTDELTA", "CORNER", "CAP", "TTDONTROUND", "TTROUND", "TTROUNDUP", "TTROUNDDOWN", "TRIPLE",
"TEXT", "ARROW", "CIRCLE", "PLUS", "MINUS",
"LTR", "RTL", "LTRTTB", "RTLTTB", "GSTopLeft", "GSTopCenter", "GSTopRight", "GSCenterLeft", "GSCenterCenter", "GSCenterRight", "GSBottomLeft", "GSBottomCenter", "GSBottomRight",
"OTF", "TTF", "VARIABLE", "UFO", "WOFF", "WOFF2", "PLAIN", "EOT",
# Methods
"divideCurve", "distance", "addPoints", "subtractPoints", "GetFolder", "GetSaveFile", "GetOpenFile", "Message", "LogToConsole", "LogError", "removeOverlap", "subtractPaths", "intersectPaths", "scalePoint",
# Classes
"GSSmartComponentAxis",
# Menus
"APP_MENU", "FILE_MENU", "EDIT_MENU", "GLYPH_MENU", "PATH_MENU", "FILTER_MENU", "VIEW_MENU", "SCRIPT_MENU", "WINDOW_MENU", "HELP_MENU",
"ONSTATE", "OFFSTATE", "MIXEDSTATE",
# Callbacks:
"DRAWFOREGROUND", "DRAWBACKGROUND", "DRAWINACTIVE", "DOCUMENTOPENED", "DOCUMENTACTIVATED", "DOCUMENTWASSAVED", "DOCUMENTEXPORTED", "DOCUMENTCLOSED", "TABDIDOPEN", "TABWILLCLOSE", "UPDATEINTERFACE", "MOUSEMOVED",
]
wrapperVersion = "2.5"
# Should help with making plugins backward compatible when they are prepared for Python3 already.
try:
from objc import python_method
except ImportError:
def python_method(arg):
return arg
objc.python_method = python_method
def _______________________(): pass
def ____CONSTANTS____(): pass
GSMOVE_ = 17
GSLINE_ = 1
GSCURVE_ = 35
GSQCURVE_ = 36
GSOFFCURVE_ = 65
GSSHARP = 0
GSSMOOTH = 100
GSMOVE = "move"
GSLINE = "line"
GSCURVE = "curve"
GSQCURVE = "qcurve"
GSOFFCURVE = "offcurve"
MOVE = "move"
LINE = "line"
CURVE = "curve"
QCURVE = "qcurve"
OFFCURVE = "offcurve"
TAG = -2
TOPGHOST = -1
STEM = 0
BOTTOMGHOST = 1
FLEX = 2
TTANCHOR = 3
TTSTEM = 4
TTALIGN = 5
TTINTERPOLATE = 6
TTDIAGONAL = 8
TTDELTA = 9
CORNER = 16
CAP = 17
TTDONTROUND = 4
TTROUND = 0
TTROUNDUP = 1
TTROUNDDOWN = 2
TRIPLE = 128
# annotations:
TEXT = 1
ARROW = 2
CIRCLE = 3
PLUS = 4
MINUS = 5
OTF = "OTF"
TTF = "TTF"
VARIABLE = "variable"
UFO = "UFO"
WOFF = "WOFF"
WOFF2 = "WOFF2"
PLAIN = "plain"
EOT = "EOT"
# Reverse lookup for __repr__
hintConstants = {
-2: 'Tag',
-1: 'TopGhost',
0: 'Stem',
1: 'BottomGhost',
2: 'TTAnchor',
3: 'TTStem',
4: 'TTAlign',
5: 'TTInterpolate',
6: 'TTDiagonal',
7: 'TTDelta',
16: 'Corner',
17: 'Cap',
}
GSTopLeft = 6
GSTopCenter = 7
GSTopRight = 8
GSCenterLeft = 3
GSCenterCenter = 4
GSCenterRight = 5
GSBottomLeft = 0
GSBottomCenter = 1
GSBottomRight = 2
# Writing direction
LTR = 0
RTL = 1
LTRTTB = 3
RTLTTB = 2
# Callbacks
DRAWFOREGROUND = "DrawForeground"
DRAWBACKGROUND = "DrawBackground"
DRAWINACTIVE = "DrawInactive"
DOCUMENTOPENED = "GSDocumentWasOpenedNotification"
DOCUMENTACTIVATED = "GSDocumentActivateNotification"
DOCUMENTWASSAVED = "GSDocumentWasSavedSuccessfully"
DOCUMENTEXPORTED = "GSDocumentWasExportedNotification"
DOCUMENTCLOSED = "GSDocumentCloseNotification"
TABDIDOPEN = "TabDidOpenNotification"
TABWILLCLOSE = "TabWillCloseNotification"
UPDATEINTERFACE = "GSUpdateInterface"
MOUSEMOVED = "mouseMovedNotification"
MOUSEDOWN = "mouseDownNotification"
MOUSEUP = "mouseUpNotification"
# Menus
APP_MENU = "APP_MENU"
FILE_MENU = "FILE_MENU"
EDIT_MENU = "EDIT_MENU"
GLYPH_MENU = "GLYPH_MENU"
PATH_MENU = "PATH_MENU"
FILTER_MENU = "FILTER_MENU"
VIEW_MENU = "VIEW_MENU"
SCRIPT_MENU = "SCRIPT_MENU"
WINDOW_MENU = "WINDOW_MENU"
HELP_MENU = "HELP_MENU"
ONSTATE = NSOnState
OFFSTATE = NSOffState
MIXEDSTATE = NSMixedState
'''
Changes in the API
==================
These changes could possibly break your code, so you need to keep track of them. Please see :attr:`GSApplication.versionNumber` for how to check for the app version in your code. Really, read it. There’s a catch.
--------------------
Major Changes in 2.5
--------------------
- Add pointPen capabilities to `GSPathPen`
- Add setter for :meth:`GSFont.selection`
- Add :attr:`GSFont.axes`, :attr:`GSFontMaster.axes` and :attr:`GSInstance.axes`
- Add :attr:`GSLayer.componentLayer`
The :class:`GSLayer` the component is pointing to. This is read-only. In order to change the referenced base glyph, set :attr:`GSComponent.componentName` to the new glyph name.
For Smart Components, the `componentLayer` contains the interpolated result.
- Add :meth:`GSInstance.generate(UFO)`
- Add :meth:`GSFont.export()`
This allows to export Variable fonts
- Add :attr:`GSGlyph.unicodes`
--------------------
Major Changes in 2.3
--------------------
.. attribute:: *.bezierPath
We've created a distinct ``.bezierPath`` attribute for various objects (paths, components, etc.) to use to draw in plug-ins, over-writing the previous (and never documented) `.bezierPath()` method (from the Python-ObjC-bridge) by the same name that handed down an `NSBezierPath` object.
Old: ``.bezierPath()``
New: ``.bezierPath``
--------------------
Major Changes in 2.2
--------------------
.. attribute:: GSLayer.selection
We've created a distinct ``.selection`` attribute for the layer object that contains all items (paths, components etc. selected by the user in the UI), overwriting the previous `.selection()` method (from the PyObjC bridge).
Old: ``.selection()``
New: ``.selection``
'''
def GSObject__copy__(self, memo=None):
return self.copy()
def GSObject__new__(typ, *args, **kwargs):
return typ.alloc().init()
class Proxy(object):
def __init__(self, owner):
self._owner = owner
def __repr__(self):
"""Return list-lookalike of representation string of objects"""
strings = []
for currItem in self:
strings.append("%s" % (currItem))
return "(%s)" % (',\n'.join(strings))
def __len__(self):
Values = self.values()
if Values is not None:
return len(Values)
return 0
def pop(self, i):
if type(i) == int:
node = self[i]
del self[i]
return node
else:
raise(KeyError)
def __iter__(self):
Values = self.values()
if Values is not None:
for element in Values:
yield element
def index(self, Value):
return self.values().index(Value)
def __copy__(self):
return list(self)
def __deepcopy__(self, memo):
return [x.copy() for x in self.values()]
def setter(self, values):
method = self.setterMethod()
if isinstance(values, (list, NSArray)):
method(NSMutableArray.arrayWithArray_(values))
elif isinstance(values, (tuple, type(self))):
method(NSMutableArray.arrayWithArray_(list(values)))
elif values is None:
method(NSMutableArray.array())
else:
raise TypeError
##################################################################################
#
#
#
# GSApplication
#
#
#
##################################################################################
def _______________________(): pass
def ____GSApplication____(): pass
def _______________________(): pass
Glyphs = NSApp()
'''
:mod:`GSApplication`
===============================================================================
The mothership. Everything starts here.
.. code-block:: python
print(Glyphs)
.. code-block:: python
<Glyphs.app>
.. class:: GSApplication()
Properties
.. autosummary::
font
fonts
reporters
activeReporters
filters
defaults
scriptAbbreviations
scriptSuffixes
languageScripts
languageData
unicodeRanges
editViewWidth
handleSize
versionString
versionNumber
buildNumber
menu
Functions
.. autosummary::
open()
showMacroWindow()
clearLog()
showGlyphInfoPanelWithSearchString()
glyphInfoForName()
glyphInfoForUnicode()
niceGlyphName()
productionGlyphName()
ligatureComponents()
addCallback()
removeCallback()
redraw()
showNotification()
localize()
activateReporter()
deactivateReporter()
**Properties**
'''
GSApplication.currentDocument = property(lambda self: NSApp().currentFontDocument())
GSApplication.documents = property(lambda self: AppDocumentProxy(self))
def Glyphs__repr__(self):
return '<Glyphs.app>'
GSApplication.__repr__ = python_method(Glyphs__repr__)
def currentFont():
try:
doc = NSApp().currentFontDocument()
return doc.font
except AttributeError:
pass
return None
# by Yanone
GSApplication.font = property(lambda self: currentFont())
'''
.. attribute:: font
:return: The active :class:`GSFont` object or None.
:rtype: :class:`GSFont`
.. code-block:: python
# topmost open font
font = Glyphs.font
'''
GSApplication.fonts = property(lambda self: AppFontProxy(self))
'''
.. attribute:: fonts
:return: All open :class:`fonts <GSFonts>`.
.. code-block:: python
# access all open fonts
for font in Glyphs.fonts:
print(font.familyName)
# add a font
font = GSFont()
font.familyName = "My New Fonts"
Glyphs.fonts.append(font)
'''
GSApplication.reporters = property(lambda self: GSCallbackHandler.sharedHandler().reporterInstances().allValues())
'''
.. attribute:: reporters
List of available reporter plug-ins (same as bottom section in the 'View' menu). These are the actual objects. You can get hold of their names using `object.__class__.__name__`.
Also see :meth:`GSApplication.activateReporter()` and :meth:`GSApplication.deactivateReporter()` methods below to activate/deactivate them.
.. code-block:: python
# List of all reporter plug-ins
print(Glyphs.reporters)
# Individual plug-in class names
for reporter in Glyphs.reporters:
print(reporter.__class__.__name__)
# Activate a plugin
Glyphs.activateReporter(Glyphs.reporters[0]) # by object
Glyphs.activateReporter('GlyphsMasterCompatibility') # by class name
.. versionadded:: 2.3
'''
GSApplication.activeReporters = property(lambda self: GSCallbackHandler.activeReporters())
'''
.. attribute:: activeReporters
List of activated reporter plug-ins.
.. versionadded:: 2.3
'''
GSApplication.filters = property(lambda self: NSApp.delegate().filterInstances())
'''
.. attribute:: filters
List of available filters (same as 'Filter' menu). These are the actual objects.
Below sample code shows how to get hold of a particular filter and use it. You invoke it using the `processFont_withArguments_()` function for old plugins, or the `filter()` function for newer plugins.
As arguments you use the list obtained by clicking on 'Copy Custom Parameter' button in the filter’s dialog (gear icon) and convert it to a list.
In the `include` option you can supply a comma-separated list of glyph names.
Here's a catch: old plugins will only run on the first layer of a glyph, because the function `processFont_withArguments_()` was designed to run on instances upon export that have already been reduced to one layer. You can work around that by changing the order of the layers, then changing them back (not shown in the sample code).
.. code-block:: python
# Helper function to get filter by its class name
def filter(name):
for filter in Glyphs.filters:
if filter.__class__.__name__ == name:
return filter
# Get the filter
offsetCurveFilter = filter('GlyphsFilterOffsetCurve')
# Run the filter (old plugins)
# The arguments came from the 'Copy Custom Parameter' as:
# Filter = "GlyphsFilterOffsetCurve;10;10;1;0.5;"
offsetCurveFilter.processFont_withArguments_(font, ['GlyphsFilterOffsetCurve', '10', '10', '1', '0.5', 'include:%s' % glyph.name])
# If the plugin were a new filter, the same call would look like this:
# (run on a specific layer, not the first layer glyphs in the include-list)
# The arguments list is a dictionary with either incrementing integers as keys or names (as per 'Copy Custom Parameter' list)
offsetCurveFilter.filter(layer, False, {0: 10, 1: 10, 2: 1, 3: 0.5})
.. versionadded:: After 2.4.2
'''
if sys.version_info[0] == 2:
STR_TYPES = (str, unicode, objc.pyobjc_unicode)
else:
STR_TYPES = (str, objc.pyobjc_unicode)
def isString(string):
return isinstance(string, STR_TYPES)
def objcObject(pyObject):
if isinstance(pyObject, (str, unicode)):
return NSString.stringWithString_(pyObject)
if isinstance(pyObject, int):
return NSNumber.numberWithInt_(pyObject)
if isinstance(pyObject, float):
return NSNumber.numberWithFloat_(pyObject)
if isinstance(pyObject, list):
array = NSMutableArray.array()
for value in pyObject:
array.addObject_(objcObject(value))
return array
if isinstance(pyObject, dict):
dictionary = NSMutableDictionary.dictionary()
for key, value in pyObject.viewitems():
dictionary.setObject_forKey_(objcObject(value), objcObject(key))
return dictionary
return pyObject
class DefaultsProxy(Proxy):
def __getitem__(self, Key):
return NSUserDefaults.standardUserDefaults().objectForKey_(Key)
def __setitem__(self, Key, Value):
if Value is not None:
NSUserDefaults.standardUserDefaults().setObject_forKey_(Value, Key)
else:
NSUserDefaults.standardUserDefaults().removeObjectForKey_(Key)
def __delitem__(self, Key):
NSUserDefaults.standardUserDefaults().removeObjectForKey_(Key)
def __repr__(self):
return "<Userdefaults>"
GSApplication.defaults = property(lambda self: DefaultsProxy(self))
def __registerDefault__(self, defaults, values=None):
if defaults is not None and values is not None and len(defaults) > 2:
NSUserDefaults.standardUserDefaults().registerDefaults_({defaults: values})
elif defaults and not values:
NSUserDefaults.standardUserDefaults().registerDefaults_(defaults)
else:
raise KeyError
GSApplication.registerDefault = __registerDefault__
def __registerDefaults__(self, defaults):
if defaults is not None:
NSUserDefaults.standardUserDefaults().registerDefaults_(defaults)
else:
raise ValueError
GSApplication.registerDefaults = __registerDefaults__
# TODO: docu for registerDefaults
'''
.. attribute:: defaults
A dict like object for storing preferences. You can get and set key-value pairs.
Please be careful with your keys. Use a prefix that uses the reverse domain name. e.g. "com.MyName.foo.bar".
.. code-block:: python
# Check for whether or not a preference exists, because has_key() doesn't work in this PyObjC-brigde
if Glyphs.defaults["com.MyName.foo.bar"] is None:
# do stuff
# Get and set values
value = Glyphs.defaults["com.MyName.foo.bar"]
Glyphs.defaults["com.MyName.foo.bar"] = newValue
# Remove value
# This will restore the default value
del(Glyphs.defaults["com.MyName.foo.bar"])
'''
class BoolDefaultsProxy(DefaultsProxy):
def __getitem__(self, Key):
return NSUserDefaults.standardUserDefaults().boolForKey_(Key)
def __setitem__(self, Key, Value):
if Value is not None:
NSUserDefaults.standardUserDefaults().setBool_forKey_(Value, Key)
else:
NSUserDefaults.standardUserDefaults().removeObjectForKey_(Key)
GSApplication.boolDefaults = property(lambda self: BoolDefaultsProxy(self))
class IntDefaultsProxy(DefaultsProxy):
def __getitem__(self, Key):
return NSUserDefaults.standardUserDefaults().integerForKey_(Key)
def __setitem__(self, Key, Value):
if Value is not None:
NSUserDefaults.standardUserDefaults().setInteger_forKey_(Value, Key)
else:
NSUserDefaults.standardUserDefaults().removeObjectForKey_(Key)
GSApplication.intDefaults = property(lambda self: IntDefaultsProxy(self))
GSApplication.scriptAbbreviations = property(lambda self: GSGlyphsInfo.scriptAbbreviations())
# fixed an typo in the property name. Kept this for compatibility.
def _old_scriptAbbreviations():
print("The method name has changed. Please use new syntax: Glyphs.scriptAbbreviations")
return GSGlyphsInfo.scriptAbbreviations()
GSApplication.scriptAbbrevations = property(lambda self: _old_scriptAbbreviations())
'''
.. attribute:: scriptAbbreviations
A dictionary with script name to abbreviation mapping, e.g., 'arabic': 'arab'
:rtype: dict`
'''
GSApplication.scriptSuffixes = property(lambda self: GSGlyphsInfo.scriptSuffixes())
'''
.. attribute:: scriptSuffixes
A dictionary with glyphs name suffixes for scripts and their respective script names, e.g., 'cy': 'cyrillic'
:rtype: dict`
'''
GSApplication.languageScripts = property(lambda self: GSGlyphsInfo.languageScripts())
'''
.. attribute:: languageScripts
A dictionary with language tag to script tag mapping, e.g., 'ENG': 'latn'
:rtype: dict`
'''
GSApplication.languageData = property(lambda self: GSGlyphsInfo.languageData())
'''
.. attribute:: languageData
A list of dictionaries with more detailed language informations.
:rtype: list`
'''
GSApplication.unicodeRanges = property(lambda self: GSGlyphsInfo.unicodeRanges())
'''
.. attribute:: unicodeRanges
Names of unicode ranges.
:rtype: list`
'''
def Glyphs_setUserDefaults(self, key, value):
self.defaults[key] = value
def NSStr(string):
if string:
return NSString.stringWithString_(string)
else:
return None
GSApplication.editViewWidth = property(lambda self: self.intDefaults["GSFontViewWidth"], lambda self, value: Glyphs_setUserDefaults(self, "GSFontViewWidth", int(value)))
'''
.. attribute:: editViewWidth
.. versionadded:: 2.3
Width of glyph Edit view. Corresponds to the "Width of editor" setting from the Preferences.
:type: int
'''
GSApplication.handleSize = property(lambda self: self.intDefaults["GSHandleSize"], lambda self, value: Glyphs_setUserDefaults(self, "GSHandleSize", int(value)))
'''
.. attribute:: handleSize
.. versionadded:: 2.3
Size of Bezier handles in Glyph Edit view. Possible value are 0–2. Corresponds to the "Handle size" setting from the Preferences.
To use the handle size for drawing in reporter plugins, you need to convert the handle size to a point size, and divide by the view's scale factor. See example below.
.. code-block:: python
# Calculate handle size
handSizeInPoints = 5 + Glyphs.handleSize * 2.5 # (= 5.0 or 7.5 or 10.0)
scaleCorrectedHandleSize = handSizeInPoints / Glyphs.font.currentTab.scale
# Draw point in size of handles
point = NSPoint(100, 100)
NSColor.redColor.set()
rect = NSRect((point.x - scaleCorrectedHandleSize * 0.5, point.y - scaleCorrectedHandleSize * 0.5), (scaleCorrectedHandleSize, scaleCorrectedHandleSize))
bezierPath = NSBezierPath.bezierPathWithOvalInRect_(rect)
bezierPath.fill()
:type: int
'''
GSApplication.versionString = NSBundle.mainBundle().infoDictionary()["CFBundleShortVersionString"]
'''
.. attribute:: versionString
.. versionadded:: 2.3
String containing Glyph.app's version number. May contain letters also, like '2.3b'. To check for a specific version, use .versionNumber below.
:type: string
'''
def Glyphs_FloatVersion(self):
m = re.match(r"(\d+)\.(\d+)", self.versionString)
return float(str(m.group(1)) + '.' + str(m.group(2)))
GSApplication.versionNumber = property(lambda self: Glyphs_FloatVersion(self))
'''
.. attribute:: versionNumber
.. versionadded:: 2.3
Glyph.app's version number. Use this to check for version in your code.
Here’s the catch: Since we only added this `versionNumber` attribute in Glyphs v2.3, it is not possible to use this attribute to check for versions of Glyphs older than 2.3. We’re deeply sorry for this inconvenience. Development is a slow and painful process.
So you must first check for the existence of the `versionNumber` attribute like so:
.. code-block:: python
# Code valid for Glyphs.app v2.3 and above:
if hasattr(Glyphs, 'versionNumber') and Glyphs.versionNumber >= 2.3:
# do stuff
# Code for older versions
else:
# do other stuff
:type: float
'''
GSApplication.buildNumber = int(NSBundle.mainBundle().infoDictionary()["CFBundleVersion"])
'''
.. attribute:: buildNumber
.. versionadded:: 2.3
Glyph.app's build number.
Especially if you're using preview builds, this number may be more important to you than the version number. The build number increases with every released build and is the most significant evidence of new Glyphs versions, while the version number is set arbitrarily and stays the same until the next stable release.
:type: int
'''
menuTagLookup = {
APP_MENU: 1,
FILE_MENU: 3,
EDIT_MENU: 5,
GLYPH_MENU: 7,
PATH_MENU: 9,
FILTER_MENU: 11,
VIEW_MENU: 13,
SCRIPT_MENU: 15,
WINDOW_MENU: 17,
HELP_MENU: 19,
}
class AppMenuProxy (Proxy):
"""Access the main menu."""
def __getitem__(self, Key):
if isinstance(Key, int):
return self._owner.mainMenu().itemAtIndex_(Key)
elif isString(Key):
Tag = menuTagLookup[Key]
return self._owner.mainMenu().itemWithTag_(Tag)
def values(self):
return self._owner.mainMenu().itemArray()
GSApplication.menu = property(lambda self: AppMenuProxy(self))
'''
.. attribute:: menu
.. versionadded:: 2.3.1-910
Add menu items to Glyphs’ main menus.
Following constants for accessing the menus are defined:
:const:`APP_MENU`, :const:`FILE_MENU`, :const:`EDIT_MENU`, :const:`GLYPH_MENU`, :const:`PATH_MENU`, :const:`FILTER_MENU`, :const:`VIEW_MENU`, :const:`SCRIPT_MENU`, :const:`WINDOW_MENU`, :const:`HELP_MENU`
.. code-block:: python
def doStuff(sender):
# do stuff
newMenuItem = NSMenuItem('My menu title', doStuff)
Glyphs.menu[EDIT_MENU].append(newMenuItem)
'''
NSMenuItem.__new__ = staticmethod(GSObject__new__)
def NSMenuItem__init__(self, title, callback, keyboard="", modifier=0):
self.setTitle_(title)
callbackTargets = None
try:
callbackTargets = callbackOperationTargets["NSMenuItem"]
except KeyError:
callbackTargets = []
callbackOperationTargets["NSMenuItem"] = callbackTargets
helper = callbackHelperClass(callback, None)
callbackTargets.append(helper)
selector = objc.selector(helper.callback, signature="v@:@")
self.setAction_(selector)
self.setTarget_(helper)
if keyboard != "":
self.setKeyEquivalent_(keyboard)
self.setKeyEquivalentModifierMask_(modifier)
NSMenuItem.__init__ = NSMenuItem__init__
def __NSMenuItem__append__(self, item):
self.submenu().addItem_(item)
NSMenuItem.append = __NSMenuItem__append__
def __NSMenuItem__insert__(self, index, item):
self.submenu().insertItem_atIndex_(item, index)
NSMenuItem.insert = __NSMenuItem__insert__
'''
**Functions**
'''
def OpenFont(self, Path, showInterface=True):
URL = NSURL.fileURLWithPath_(Path)
Doc = self.openDocumentWithContentsOfURL_display_(URL, showInterface)
if Doc is not None:
return Doc.font
return None
GSApplication.open = OpenFont
'''
.. function:: open(Path, [showInterface=True])
Opens a document
:param Path: The path where the document is located.
:type Path: str
:param showInterface: If a document window should be opened. Default: True
:type showInterface: bool
:return: The opened document object or None.
:rtype: :class:`GSFont`
'''
def __ShowMacroWindow(self):
Glyphs.delegate().showMacroWindow()
GSApplication.showMacroWindow = __ShowMacroWindow
'''
.. function:: showMacroWindow
Opens the macro window
.. function:: clearLog()
Deletes the content of the console in the macro window
'''
def __showGlyphInfoPanelWithSearchString__(self, String):
Glyphs.delegate().showGlyphInfoPanelWithSearchString_(String)
GSApplication.showGlyphInfoPanelWithSearchString = __showGlyphInfoPanelWithSearchString__
'''
.. function:: showGlyphInfoPanelWithSearchString(String)
Shows the Glyph Info window with a preset search string
:param String: The search term
'''
def _glyphInfoForName(self, String, font=None):
if type(String) is int:
return self.glyphInfoForUnicode(String)
if font is not None:
return font.glyphsInfo().glyphInfoForName_(String)
return GSGlyphsInfo.sharedManager().glyphInfoForName_(String)
GSApplication.glyphInfoForName = _glyphInfoForName
'''
.. function:: glyphInfoForName(String)
Generates :class:`GSGlyphInfo` object for a given glyph name.
:param String: Glyph name
:param font: if you add a font, and the font has a local glyph info, it will be used instead of the global info data.
:return: :class:`GSGlyphInfo`
'''
def _glyphInfoForUnicode(self, String, font=None):
if type(String) is int:
String = "%04X" % String
if font is not None:
return font.glyphsInfo().glyphInfoForUnicode_(String)
return GSGlyphsInfo.sharedManager().glyphInfoForUnicode_(String)
GSApplication.glyphInfoForUnicode = _glyphInfoForUnicode
'''
.. function:: glyphInfoForUnicode(Unicode)
Generates :class:`GSGlyphInfo` object for a given hex unicode.
:param String: Hex unicode
:param font: if you add a font, and the font has a local glyph info, it will be used instead of the global info data.
:return: :class:`GSGlyphInfo`
'''
def _niceGlyphName(self, String, font=None):
if font is not None:
return font.glyphsInfo().niceGlyphNameForName_(String)
return GSGlyphsInfo.sharedManager().niceGlyphNameForName_(String)
GSApplication.niceGlyphName = _niceGlyphName
'''
.. function:: niceGlyphName(Name)
Converts glyph name to nice, human-readable glyph name (e.g. afii10017 or uni0410 to A-cy)
:param string: glyph name
:param font: if you add a font, and the font has a local glyph info, it will be used instead of the global info data.
:return: string
'''
def _productionGlyphName(self, String, font=None):
if font is not None:
return font.glyphsInfo().productionGlyphNameForName_(String)
return GSGlyphsInfo.sharedManager().productionGlyphNameForName_(String)
GSApplication.productionGlyphName = _productionGlyphName
'''
.. function:: productionGlyphName(Name)
Converts glyph name to production glyph name (e.g. afii10017 or A-cy to uni0410)
:param string: glyph name
:param font: if you add a font, and the font has a local glyph info, it will be used instead of the global info data.
:return: string
'''
def _ligatureComponents(self, String, font=None):
if font is not None:
return font.glyphsInfo().componentsForLigaName_font_(String, font)
return GSGlyphsInfo.sharedManager().componentsForLigaName_font_(String, None)
GSApplication.ligatureComponents = _ligatureComponents
'''
.. function:: ligatureComponents(String)
If defined as a ligature in the glyph database, this function returns a list of glyph names that this ligature could be composed of.
:param string: glyph name
:param font: if you add a font, and the font has a local glyph info, it will be used instead of the global info data.
:return: list
.. code-block:: python
print(Glyphs.ligatureComponents('allah-ar'))
(
"alef-ar",
"lam-ar.init",
"lam-ar.medi",
"heh-ar.fina"
)
'''
##########################################################################################################
#
#
# Callback section
#
#
DrawLayerCallbacks = (DRAWFOREGROUND, DRAWBACKGROUND, DRAWINACTIVE)
Observers = (DOCUMENTOPENED, DOCUMENTACTIVATED, DOCUMENTWASSAVED, DOCUMENTEXPORTED, DOCUMENTCLOSED, TABDIDOPEN, TABWILLCLOSE, UPDATEINTERFACE, MOUSEMOVED)
callbackOperationTargets = {}
class callbackHelperClass(NSObject):
def __init__(self, func, operation):
self.func = func
self.operation = operation
def __new__(typ, *args, **kwargs):
self = callbackHelperClass.alloc().init()
if len(args) > 1:
self.func = args[0]
self.operation = args[1]
return self
def drawForegroundForLayer_options_(self, Layer, options):
try:
if self.func:
self.func(Layer, options)
except:
LogError(traceback.format_exc())
def drawBackgroundForLayer_options_(self, Layer, options):
try:
if self.func:
self.func(Layer, options)
except:
LogError(traceback.format_exc())
def drawBackgroundForInactiveLayer_options_(self, Layer, options):
try:
if self.func:
self.func(Layer, options)
except:
LogError(traceback.format_exc())
@objc.python_method
def callback(self, notification):
if self.func:
self.func(notification)
def description(self): # for debugging in Xcode
desc = super(callbackHelperClass, self).description()
return "%s %s" % (desc, str(self.func))
def __addCallback__(self, target, operation):
# Remove possible old function by the same name
targetName = str(target)
try:
callbackTargets = None
try:
callbackTargets = callbackOperationTargets[operation]
except:
callbackTargets = {}
callbackOperationTargets[operation] = callbackTargets
if targetName in callbackTargets:
self.removeCallback(target, operation)
# DrawLayerCallbacks
if operation in DrawLayerCallbacks:
# Add class to callbackTargets dict by the function name
callbackTargets[targetName] = callbackHelperClass(target, operation)
# Add to stack
GSCallbackHandler.addCallback_forOperation_(callbackTargets[targetName], operation)
# Redraw immediately
self.redraw()
# Other observers
elif operation in Observers:
# Add class to callbackTargets dict by the function name
callbackTargets[targetName] = callbackHelperClass(target, operation)
selector = objc.selector(callbackTargets[targetName].callback, signature="v@:@")
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(callbackTargets[targetName], selector, operation, objc.nil)
except:
NSLog(traceback.format_exc())
GSApplication.addCallback = __addCallback__
'''
.. function:: addCallback(function, hook)
.. versionadded:: 2.3
Add a user-defined function to the glyph window's drawing operations, in the foreground and background for the active glyph as well as in the inactive glyphs.
The function names are used to add/remove the functions to the hooks, so make sure to use unique function names.
Your function needs to accept two values: `layer` which will contain the respective :class:`GSLayer` object of the layer we're dealing with and `info` which is a dictionary and contains the value `Scale` (for the moment).
For the hooks these constants are defined: `DRAWFOREGROUND`, `DRAWBACKGROUND`, `DRAWINACTIVE`, `DOCUMENTWASSAVED`, `DOCUMENTOPENED`, `TABDIDOPEN`, `TABWILLCLOSE`, `UPDATEINTERFACE`, `MOUSEMOVED`. For more information check the constants section.
.. code-block:: python
def drawGlyphIntoBackground(layer, info):
# Due to internal Glyphs.app structure, we need to catch and print exceptions
# of these callback functions with try/except like so:
try:
# Your drawing code here
NSColor.redColor().set()
layer.bezierPath.fill()
# Error. Print exception.
except:
import traceback
print(traceback.format_exc())
# add your function to the hook
Glyphs.addCallback(drawGlyphIntoBackground, DRAWBACKGROUND)
'''
def __do__removeCallback___(self, target, operation):
targetName = str(target)
callbackTargets = None
try:
callbackTargets = callbackOperationTargets[operation]
except:
return
if targetName in callbackTargets:
# DrawLayerCallbacks
if callbackTargets[targetName].operation in DrawLayerCallbacks:
GSCallbackHandler.removeCallback_(callbackTargets[targetName])
del(callbackTargets[targetName])
# Redraw immediately
self.redraw()
# Other observers
elif callbackTargets[targetName].operation in Observers:
NSNotificationCenter.defaultCenter().removeObserver_(callbackTargets[targetName])
del(callbackTargets[targetName])
def __removeCallback___(self, target, operation=None):
if operation is not None:
__do__removeCallback___(self, target, operation)
else:
for operation in callbackOperationTargets.keys():
__do__removeCallback___(self, target, operation)
GSApplication.removeCallback = __removeCallback___
'''
.. function:: removeCallback(function)
.. versionadded:: 2.3
Remove the function you've previously added.
.. code-block:: python
# remove your function to the hook
Glyphs.removeCallback(drawGlyphIntoBackground)
'''
#
#
# // end of Callback section
#
#
##########################################################################################################
def __redraw__(self):
NSNotificationCenter.defaultCenter().postNotificationName_object_("GSRedrawEditView", None)
GSApplication.redraw = __redraw__
'''
.. function:: redraw()
Redraws all Edit views and Preview views.
'''
def Glyphs_showNotification(self, title, message):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setInformativeText_(message)
NSUserNotificationCenter.defaultUserNotificationCenter().deliverNotification_(notification)
GSApplication.showNotification = Glyphs_showNotification
'''
.. function:: showNotification(title, message)
Shows the user a notification in Mac's Notification Center.
.. code-block:: python
Glyphs.showNotification('Export fonts', 'The export of the fonts was successful.')
'''
def Glyphs_localize(self, localization):
if isString(localization):
return localization
elif type(localization) == dict:
# Return first match of languages list
for priority in self.defaults["AppleLanguages"]:
priority = priority[:2]
if priority in localization:
return localization[priority]
language = localization.get("en", None) # frist look if there is a english entry.
if language is not None:
return language
# None found, return first item in localization dict
return localization[localization.keys()[0]]
GSApplication.localize = Glyphs_localize
'''
.. function:: localize(localization)
.. versionadded:: 2.3
Return a string in the language of Glyphs.app’s UI locale, which must be supplied as a dictionary using language codes as keys.
The argument is a dictionary in the `languageCode: translatedString` format.
You don’t need to supply strings in all languages that the Glyphs.app UI supports. A subset will do. Just make sure that you add at least an English string to default to next to all your other translated strings. Also don’t forget to mark strings as unicode strings (`u'öäüß'`) when they contain non-ASCII content for proper encoding, and add a `# encoding: utf-8` to the top of all your .py files.
Tip: You can find Glyphs’ localized languages here `Glyphs.defaults["AppleLanguages"]`.
.. code-block:: python
# encoding: utf-8
print(Glyphs.localize({
'en': 'Hello World',
'de': u'Hallöle Welt',
'fr': 'Bonjour tout le monde',
'es': 'Hola Mundo',
}))
# Given that your Mac’s system language is set to German
# and Glyphs.app UI is set to use localization (change in preferences),
# it will print:
> Hallöle Welt
'''
def __GSApplication_activateReporter__(self, Reporter):
if isString(Reporter):
for r in self.reporters:
if r.__class__.__name__ == Reporter:
Reporter = r
break
GSCallbackHandler.activateReporter_(Reporter)
GSApplication.activateReporter = __GSApplication_activateReporter__
'''
.. function:: activateReporter(reporter)
.. versionadded:: 2.3
Activate a reporter plug-in by its object (see Glyphs.reporters) or class name.
.. code-block:: python
Glyphs.activateReporter('GlyphsMasterCompatibility')
'''
def __GSApplication_deactivateReporter__(self, Reporter):
if isString(Reporter):
for r in self.reporters:
if r.__class__.__name__ == Reporter:
Reporter = r
break
GSCallbackHandler.deactivateReporter_(Reporter)
GSApplication.deactivateReporter = __GSApplication_deactivateReporter__
'''
.. function:: deactivateReporter(reporter)
.. versionadded:: 2.3
Deactivate a reporter plug-in by its object (see Glyphs.reporters) or class name.
.. code-block:: python
Glyphs.deactivateReporter('GlyphsMasterCompatibility')
'''
GSDocument.__new__ = staticmethod(GSObject__new__)
GSProjectDocument.__new__ = staticmethod(GSObject__new__)
GSElement.x = property(lambda self: self.pyobjc_instanceMethods.position().x,
lambda self, value: self.setPosition_(NSMakePoint(value, self.y)))
GSElement.y = property(lambda self: self.pyobjc_instanceMethods.position().y,
lambda self, value: self.setPosition_(NSMakePoint(self.x, value)))
GSElement.layer = property(lambda self: self.pyobjc_instanceMethods.layer())
GSElement.__new__ = staticmethod(GSObject__new__)
def ____PROXIES____(): pass
def _______________________(): pass
class AppDocumentProxy (Proxy):
"""The list of documents."""
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
Values = self.values()
if Key < 0:
Key = len(Values) + Key
return Values[Key]
else:
raise(KeyError)
def append(self, doc):
NSDocumentController.sharedDocumentController().addDocument_(doc)
doc.makeWindowControllers()
doc.showWindows()
def values(self):
return self._owner.fontDocuments()
class AppFontProxy (Proxy):
"""The list of fonts."""
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
Values = self.values()
if Key < 0:
Key = len(Values) + Key
return Values[Key]
else:
raise(KeyError)
def values(self):
fonts = []
for doc in self._owner.fontDocuments():
fonts.append(doc.font)
return fonts
def append(self, font):
doc = Glyphs.documentController().openUntitledDocumentAndDisplay_error_(True, None)[0]
doc.setFont_(font)
def extend(self, fonts):
for font in fonts:
self.append(font)
GSDocument.font = property(lambda self: self.pyobjc_instanceMethods.font(),
lambda self, value: self.setFont_(value))
'''
.. attribute:: font
The active :class:`GSFont`
:type: list
'''
class FontGlyphsProxy (Proxy):
"""The list of glyphs. You can access it with the index or the glyph name.
Usage:
Font.glyphs[index]
Font.glyphs[name]
for glyph in Font.glyphs:
...
"""
def __getitem__(self, Key):
if Key is None:
return None
if type(Key) == slice:
return self.values().__getitem__(Key)
# by index
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.glyphAtIndex_(Key)
# by glyph name
elif self._owner.glyphForName_(Key):
return self._owner.glyphForName_(Key)
# by string representation as u'ä'
elif len(Key) == 1 and self._owner.glyphForCharacter_(ord(Key)):
return self._owner.glyphForCharacter_(ord(Key))
# by unicode
else:
return self._owner.glyphForUnicode_(Key.upper())
def __setitem__(self, Key, Glyph):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.removeGlyph_(self._owner.glyphAtIndex_(Key))
self._owner.addGlyph_(Glyph)
else:
self._owner.removeGlyph_(self._owner.glyphForName_(Key))
self._owner.addGlyph_(Glyph)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.removeGlyph_(self._owner.glyphAtIndex_(Key))
else:
self._owner.removeGlyph_(self._owner.glyphForName_(Key))
def __contains__(self, item):
if isString(item):
return self._owner.glyphForName_(item) is not None
return self._owner.indexOfGlyph_(item) < NSNotFound # indexOfGlyph_ returns NSNotFound which is some very big number
def keys(self):
return self._owner.pyobjc_instanceMethods.glyphs().valueForKeyPath_("@unionOfObjects.name")
def values(self):
return self._owner.pyobjc_instanceMethods.glyphs()
def items(self):
Items = []
for Value in self._owner.pyobjc_instanceMethods.glyphs():
Key = Value.name
Items.append((Key, Value))
return Items
def has_key(self, Key):
return self._owner.glyphForName_(Key) is not None
def append(self, Glyph):
if not self.has_key(Glyph.name):
self._owner.addGlyph_(Glyph)
else:
raise NameError('There is a glyph with the name \"%s\" already in the font.' % Glyph.name)
def extend(self, objects):
self._owner.addGlyphsFromArray_(list(objects))
def __len__(self):
return self._owner.count()
def setterMethod(self):
return self._owner.setGlyphs_
class FontFontMasterProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.fontMasterAtIndex_(Key)
elif isString(Key):
return self._owner.fontMasterForId_(Key)
else:
raise(KeyError)
def __setitem__(self, Key, FontMaster):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceFontMasterAtIndex_withFontMaster_(Key, FontMaster)
elif isString(Key):
OldFontMaster = self._owner.fontMasterForId_(Key)
self._owner.removeFontMaster_(OldFontMaster)
return self._owner.addFontMaster_(FontMaster)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
removeFontMaster = self._owner.objectInFontMastersAtIndex_(Key)
else:
removeFontMaster = self._owner.fontMasterForId_(Key)
if removeFontMaster:
return self._owner.removeFontMasterAndContent_(removeFontMaster)
def __iter__(self):
for index in range(self._owner.countOfFontMasters()):
yield self._owner.fontMasterAtIndex_(index)
def __len__(self):
return self._owner.countOfFontMasters()
def values(self):
return self._owner.pyobjc_instanceMethods.fontMasters()
def setterMethod(self):
return self._owner.setFontMasters_
def append(self, FontMaster):
self._owner.addFontMaster_(FontMaster)
def remove(self, FontMaster):
self._owner.removeFontMasterAndContent_(FontMaster)
def insert(self, Index, FontMaster):
self._owner.insertFontMaster_atIndex_(FontMaster, Index)
def extend(self, FontMasters):
for FontMaster in FontMasters:
self._owner.addFontMaster_(FontMaster)
class FontInstancesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInInstancesAtIndex_(Key)
else:
raise(KeyError)
def __setitem__(self, Key, Class):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceObjectInInstancesAtIndex_withObject_(Key, Class)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.removeObjectFromInstancesAtIndex_(Key)
def __iter__(self):
for index in range(self._owner.countOfInstances()):
yield self._owner.objectInInstancesAtIndex_(index)
def append(self, Instance):
self._owner.addInstance_(Instance)
def extend(self, Instances):
for Instance in Instances:
self._owner.addInstance_(Instance)
def remove(self, Instance):
self._owner.removeInstance_(Instance)
def insert(self, Index, Instance):
self._owner.insertObject_inInstancesAtIndex_(Instance, Index)
def __len__(self):
return self._owner.countOfInstances()
def values(self):
return self._owner.pyobjc_instanceMethods.instances()
def setterMethod(self):
return self._owner.setInstances_
class FontAxesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInAxesAtIndex_(Key)
else:
raise(KeyError)
def __setitem__(self, Key, Class):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceObjectInAxesAtIndex_withObject_(Key, Class)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.removeObjectFromAxesAtIndex_(Key)
def __iter__(self):
for index in range(self._owner.countOfAxes()):
yield self._owner.objectInAxesAtIndex_(index)
def append(self, axis):
self._owner.addAxis_(axis)
def extend(self, Axes):
for axis in Axes:
self._owner.addAxis_(axis)
def remove(self, axis):
self._owner.removeAxis_(axis)
def insert(self, Index, axis):
self._owner.insertObject_inAxesAtIndex_(axis, Index)
def __len__(self):
return self._owner.countOfAxes()
def values(self):
return self._owner.pyobjc_instanceMethods.axes()
def setterMethod(self):
return self._owner.setAxes_
class MasterAxesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
count = self.__len__()
if Key >= count:
raise IndexError("list index out of range")
if Key == 0:
return self._owner.weightValue
if Key == 1:
return self._owner.widthValue
if Key == 2:
return self._owner.customValue
if Key == 3:
return self._owner.customValue1
if Key == 4:
return self._owner.customValue2
if Key == 5:
return self._owner.customValue3
raise(KeyError)
def __setitem__(self, Key, value):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
count = self.__len__()
if Key >= count:
raise IndexError("list index out of range")
if Key == 0:
self._owner.weightValue = value
return
if Key == 1:
self._owner.widthValue = value
return
if Key == 2:
self._owner.customValue = value
return
if Key == 3:
self._owner.customValue1 = value
return
if Key == 4:
self._owner.customValue2 = value
return
if Key == 5:
self._owner.customValue3 = value
return
raise(KeyError)
def __delitem__(self, Key):
raise("Can't delete axis values")
def __iter__(self):
for index in range(self.__len__()):
yield self.__getitem__(index)
def append(self, value):
raise("Can't append axis values")
def extend(self, value):
raise("Can't extend axis values")
def remove(self, value):
raise("Can't remove axis values")
def insert(self, Index, value):
raise("Can't insert axis values")
def __len__(self):
return min(6, self._owner.font.countOfAxes())
def values(self):
count = self.__len__()
values = []
for i in range(count):
values.append(self.__getitem__(i))
return values
def setter(self, values):
count = min(self.__len__(), len(values))
for i in range(count):
value = values[i]
self.__setitem__(i, value)
class InstanceAxesProxy (MasterAxesProxy):
def __getitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
count = self.__len__()
if Key >= count:
raise IndexError("list index out of range")
if Key == 0:
return self._owner.interpolationWeight()
if Key == 1:
return self._owner.interpolationWidth()
if Key == 2:
return self._owner.interpolationCustom()
if Key == 3:
return self._owner.interpolationCustom1()
if Key == 4:
return self._owner.interpolationCustom2()
if Key == 5:
return self._owner.interpolationCustom3()
raise(KeyError)
def __setitem__(self, Key, value):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
count = self.__len__()
if Key >= count:
raise IndexError("list index out of range")
if Key == 0:
self._owner.setInterpolationWeight_(value)
return
if Key == 1:
self._owner.setInterpolationWidth_(value)
return
if Key == 2:
self._owner.setInterpolationCustom_(value)
return
if Key == 3:
self._owner.setInterpolationCustom1_(value)
return
if Key == 4:
self._owner.setInterpolationCustom2_(value)
return
if Key == 5:
self._owner.setInterpolationCustom3_(value)
return
raise(KeyError)
class CustomParametersProxy(Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
return self._owner.objectInCustomParametersAtIndex_(Key)
else:
return self._owner.customValueForKey_(Key)
def __setitem__(self, Key, Parameter):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
Value = self._owner.objectInCustomParametersAtIndex_(Key)
if Value is not None:
Value.setValue_(objcObject(Parameter))
else:
raise ValueError
else:
self._owner.setCustomParameter_forKey_(objcObject(Parameter), objcObject(Key))
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.removeObjectFromCustomParametersAtIndex_(Key)
else:
self._owner.removeObjectFromCustomParametersForKey_(Key)
def __contains__(self, item):
if isString(item):
return self._owner.customParameterForKey_(item) is not None
return self._owner.pyobjc_instanceMethods.customParameters().containsObject_(item)
def __iter__(self):
for index in range(self._owner.countOfCustomParameters()):
yield self._owner.objectInCustomParametersAtIndex_(index)
def append(self, Parameter):
self._owner.addCustomParameter_(Parameter)
def extend(self, Parameters):
for Parameter in Parameters:
self._owner.addCustomParameter_(Parameter)
def remove(self, Parameter):
self._owner.removeObjectFromCustomParametersForKey_(Parameter.name)
def insert(self, Index, Parameter):
customParameters = copy.copy(self.values())
customParameters.insert(Index, Parameter)
self._owner.setCustomParameters_(customParameters)
def __len__(self):
return self._owner.countOfCustomParameters()
def values(self):
return self._owner.pyobjc_instanceMethods.customParameters()
def setterMethod(self):
return self._owner.setCustomParameters_
class FontClassesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInClassesAtIndex_(Key)
elif isString(Key):
if len(Key) > 0:
return self._owner.classForTag_(Key)
raise(KeyError)
def __setitem__(self, Key, Class):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceObjectInClassesAtIndex_withObject_(Key, Class)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.removeObjectFromClassesAtIndex_(Key)
elif isString(Key):
Class = self._owner.classForTag_(Key)
if Class is not None:
return self._owner.removeClass_(Class)
def __iter__(self):
for index in range(self._owner.countOfClasses()):
yield self._owner.objectInClassesAtIndex_(index)
def append(self, Class):
self._owner.addClass_(Class)
def extend(self, Classes):
for Class in Classes:
self._owner.addClass_(Class)
def remove(self, Class):
self._owner.removeClass_(Class)
def insert(self, Index, Class):
self._owner.insertObject_inClassesAtIndex_(Class, Index)
def __len__(self):
return self._owner.countOfClasses()
def values(self):
return self._owner.pyobjc_instanceMethods.classes()
def setterMethod(self):
return self._owner.setClasses_
class FontFeaturesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInFeaturesAtIndex_(Key)
if isString(Key):
return self._owner.featureForTag_(Key)
else:
raise(KeyError)
def __setitem__(self, Key, Feature):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceObjectInFeaturesAtIndex_withObject_(Key, Feature)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.removeObjectFromFeaturesAtIndex_(Key)
elif isString(Key):
Feature = self._owner.featureForTag_(Key)
if Feature is not None:
return self._owner.removeFeature_(Feature)
def __iter__(self):
for index in range(self._owner.countOfFeatures()):
yield self._owner.objectInFeaturesAtIndex_(index)
def append(self, Feature):
self._owner.addFeature_(Feature)
def extend(self, Features):
for Feature in Features:
self._owner.addFeature_(Feature)
def remove(self, Class):
self._owner.removeFeature_(Class)
def insert(self, Index, Class):
self._owner.insertObject_inFeaturesAtIndex_(Class, Index)
def __len__(self):
return self._owner.countOfFeatures()
def text(self):
LineList = []
for Feature in self._owner.pyobjc_instanceMethods.features():
LineList.append("feature ")
LineList.append(Feature.name)
LineList.append(" {\n")
LineList.append(" " + Feature.code)
LineList.append("\n} ")
LineList.append(Feature.name)
LineList.append(" ;\n")
return "".join(LineList)
def values(self):
return self._owner.pyobjc_instanceMethods.features()
def setterMethod(self):
return self._owner.setFeatures_
class FontFeaturePrefixesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInFeaturePrefixesAtIndex_(Key)
if isString(Key):
return self._owner.featurePrefixForTag_(Key)
else:
raise(KeyError)
def __setitem__(self, Key, Feature):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.replaceObjectInFeaturePrefixesAtIndex__withObject_(Key, Feature)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.removeObjectFromFeaturePrefixesAtIndex_(Key)
elif isString(Key):
FeaturePrefix = self._owner.featurePrefixForTag_(Key)
if FeaturePrefix is not None:
return self._owner.removeFeaturePrefix_(FeaturePrefix)
def append(self, FeaturePrefix):
self._owner.addFeaturePrefix_(FeaturePrefix)
def extend(self, FeaturePrefixes):
for FeaturePrefix in FeaturePrefixes:
self._owner.addFeaturePrefix_(FeaturePrefix)
def remove(self, FeaturePrefix):
self._owner.removeFeaturePrefix_(FeaturePrefix)
def insert(self, Index, FeaturePrefix):
self._owner.insertObject_inFeaturePrefixesAtIndex_(FeaturePrefix, Index)
def text(self):
LineList = []
for Prefixe in self._owner.pyobjc_instanceMethods.featurePrefixes():
LineList.append("# " + Prefixe.name)
LineList.append(Prefixe.code)
return "".join(LineList)
def values(self):
return self._owner.pyobjc_instanceMethods.featurePrefixes()
def setterMethod(self):
return self._owner.setFeaturePrefixes_
class UserDataProxy(Proxy):
def __getitem__(self, Key):
return self._owner.userDataForKey_(Key)
def __setitem__(self, Key, Value):
self._owner.setUserData_forKey_(objcObject(Value), Key)
def __delitem__(self, Key):
self._owner.removeUserDataForKey_(Key)
def values(self):
userData = self._owner.pyobjc_instanceMethods.userData()
if userData is not None:
return userData.allValues()
return None
def keys(self):
userData = self._owner.pyobjc_instanceMethods.userData()
if userData is not None:
return userData.allKeys()
return None
def __repr__(self):
return self._owner.pyobjc_instanceMethods.userData().__repr__()
def __contains__(self, item):
return self._owner.pyobjc_instanceMethods.userData().objectForKey_(item) is not None
def has_key(self, item):
return self._owner.pyobjc_instanceMethods.userData().objectForKey_(item) is not None
def get(self, key, default=None):
value = self.__getitem__(key)
if value is None:
return default
return value
class SmartComponentPoleMappingProxy(Proxy):
def __getitem__(self, Key):
poleMapping = self._owner.partSelection()
if poleMapping is not None:
return poleMapping[Key]
return None
def __setitem__(self, Key, Value):
poleMapping = self._owner.partSelection()
if poleMapping is None:
self._owner.setPartSelection_(NSMutableDictionary.dictionaryWithObject_forKey_(objcObject(Value), Key))
else:
poleMapping[Key] = objcObject(Value)
def __delitem__(self, Key):
poleMapping = self._owner.partSelection()
if poleMapping is not None:
del(poleMapping[Key])
def values(self):
poleMapping = self._owner.partSelection()
if poleMapping is not None:
return poleMapping.allValues()
return None
def __repr__(self):
poleMapping = self._owner.partSelection()
return str(poleMapping)
class smartComponentValuesProxy(Proxy):
def __getitem__(self, Key):
pieceSettings = self._owner.pieceSettings()
if pieceSettings is not None:
return pieceSettings[Key]
return None
def __setitem__(self, Key, Value):
pieceSettings = self._owner.pieceSettings()
if pieceSettings is None:
self._owner.setPieceSettings_({Key: objcObject(Value)})
else:
pieceSettings[Key] = objcObject(Value)
def __delitem__(self, Key):
pieceSettings = self._owner.pieceSettings()
if pieceSettings is not None:
del(pieceSettings[Key])
def values(self):
pieceSettings = self._owner.pieceSettings()
if pieceSettings is not None:
return pieceSettings.allValues()
return None
def __repr__(self):
pieceSettings = self._owner.pieceSettings()
return str(pieceSettings)
class LayersIterator:
def __init__(self, owner):
self.curInd = 0
self._owner = owner
def __iter__(self):
return self
def next(self):
if self._owner.parent:
if self.curInd < self._owner.parent.countOfFontMasters():
FontMaster = self._owner.parent.fontMasterAtIndex_(self.curInd)
Item = self._owner.layerForKey_(FontMaster.id)
else:
if self.curInd >= self._owner.countOfLayers():
raise StopIteration
ExtraLayerIndex = self.curInd - self._owner.parent.countOfFontMasters()
Index = 0
ExtraLayer = None
while ExtraLayerIndex >= 0:
ExtraLayer = self._owner.objectInLayersAtIndex_(Index)
if ExtraLayer.layerId != ExtraLayer.associatedMasterId:
ExtraLayerIndex -= 1
Index += 1
Item = ExtraLayer
self.curInd += 1
return Item
else:
if self.curInd >= self._owner.countOfLayers():
raise StopIteration
Item = self._owner.objectInLayersAtIndex_(self.curInd)
self.curInd += 1
return Item
return None
class GlyphLayerProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
if self._owner.parent:
if Key < self._owner.parent.countOfFontMasters():
FontMaster = self._owner.parent.fontMasterAtIndex_(Key)
return self._owner.layerForKey_(FontMaster.id)
else:
ExtraLayerIndex = Key - len(self._owner.parent.masters)
Index = 0
ExtraLayer = None
while ExtraLayerIndex >= 0:
ExtraLayer = self._owner.pyobjc_instanceMethods.layers().objectAtIndex_(Index)
if ExtraLayer.layerId != ExtraLayer.associatedMasterId:
ExtraLayerIndex = ExtraLayerIndex - 1
Index = Index + 1
return ExtraLayer
else:
return self._owner.pyobjc_instanceMethods.layers().objectAtIndex_(Key)
else:
layer = self._owner.layerForKey_(Key)
if layer is None:
layer = self._owner.layerForName_(Key)
return layer
def __setitem__(self, Key, Layer):
if type(Key) is int and self._owner.parent:
if Key < 0:
Key = self.__len__() + Key
FontMaster = self._owner.parent.fontMasterAtIndex_(Key)
Key = FontMaster.id
return self._owner.setLayer_forKey_(Layer, Key)
def __delitem__(self, Key):
if type(Key) is int and self._owner.parent:
if Key < 0:
Key = self.__len__() + Key
Layer = self.__getitem__(Key)
Key = Layer.layerId
return self._owner.removeLayerForKey_(Key)
def __iter__(self):
return LayersIterator(self._owner)
def __len__(self):
return self._owner.countOfLayers()
def values(self):
return self._owner.pyobjc_instanceMethods.layers().allValues()
def append(self, Layer):
if not Layer.associatedMasterId:
Layer.associatedMasterId = self._owner.parent.masters[0].id
self._owner.setLayer_forKey_(Layer, NSString.UUID())
def extend(self, Layers):
for Layer in Layers:
self.append(Layer)
def remove(self, Layer):
return self._owner.removeLayerForKey_(Layer.layerId)
def insert(self, Index, Layer):
self.append(Layer)
def setter(self, values):
newLayers = NSMutableDictionary.dictionary()
if type(values) == list or type(values) == tuple or type(values) == type(self):
for layer in values:
newLayers[layer.layerId] = layer
elif type(values) == dict or isinstance(values, NSDictionary):
for (key, layer) in values.items():
layer.layerId = key
newLayers[key] = layer
else:
raise TypeError
self._owner.setLayers_(newLayers)
class LayerComponentsProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if Key < 0:
Key = self.__len__() + Key
return self._owner.componentAtIndex_(Key)
def __setitem__(self, Key, Component):
if Key < 0:
Key = self.__len__() + Key
self._owner.setComponent_atIndex_(Component, Key)
def __delitem__(self, Key):
if Key < 0:
Key = self.__len__() + Key
self._owner.removeComponentAtIndex_(Key)
def __copy__(self):
return [x.copy() for x in self.values()]
def append(self, Component):
self._owner.addComponent_(Component)
def extend(self, Components):
for Component in Components:
self._owner.addComponent_(Component)
def insert(self, Index, Component):
self._owner.insertComponent_atIndex_(Component, Index)
def remove(self, Component):
self._owner.removeComponent_(Component)
def values(self):
return self._owner.pyobjc_instanceMethods.components()
def setterMethod(self):
return self._owner.setComponents_
class GlyphSmartComponentAxesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
if isinstance(Key, int):
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInPartsSettingsAtIndex_(Key)
if isString(Key):
for partSetting in self._owner.partsSettings():
if partSetting.name == Key:
return partSetting
return None
def __setitem__(self, Key, SmartComponentProperty):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.insertObject_inPartsSettingsAtIndex_(SmartComponentProperty, Key)
def __delitem__(self, Key):
if type(Key) is int:
if Key < 0:
Key = self.__len__() + Key
self._owner.removeObjectFromPartsSettingsAtIndex_(Key)
def append(self, SmartComponentProperty):
self._owner.addPartsSetting_(SmartComponentProperty)
def values(self):
return self._owner.partsSettings()
def setterMethod(self):
return self._owner.setPartsSettings_
class LayerGuideLinesProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
elif type(Key) == int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.guideLineAtIndex_(Key)
raise(KeyError)
def __setitem__(self, Key, Component):
self._owner.setGuideLine_atIndex_(Component, Key)
def __delitem__(self, Key):
self._owner.removeGuideLineAtIndex_(Key)
def __copy__(self):
return [x.copy() for x in self.values()]
def append(self, GuideLine):
self._owner.addGuideLine_(GuideLine)
def extend(self, GuideLines):
for GuideLine in GuideLines:
self._owner.addGuideLine_(GuideLine)
def insert(self, Index, GuideLine):
self._owner.insertGuideLine_atIndex_(GuideLine, Index)
def remove(self, GuideLine):
self._owner.removeGuideLine_(GuideLine)
def values(self):
return self._owner.pyobjc_instanceMethods.guideLines()
def setterMethod(self):
return self._owner.setGuideLines_
class LayerAnnotationProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
elif type(Key) == int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.objectInAnnotationsAtIndex_(Key)
raise(KeyError)
def __setitem__(self, Key, Annotation):
self._owner.insertObject_inAnnotationsAtIndex_(Annotation, Key)
def __delitem__(self, Key):
self._owner.removeObjectFromAnnotationsAtIndex_(Key)
def __copy__(self):
return [x.copy() for x in self.values()]
def append(self, Annotation):
self._owner.addAnnotation_(Annotation)
def extend(self, Annotations):
for Annotation in Annotations:
self._owner.addAnnotation_(Annotation)
def insert(self, Index, Annotation):
annotations = self.values()
annotations.insert(Index, Annotation)
self._owner.setAnnotations_(annotations)
def remove(self, Annotation):
self._owner.removeAnnotation_(Annotation)
def values(self):
return self._owner.pyobjc_instanceMethods.annotations()
def setterMethod(self):
return self._owner.setAnnotations_
class LayerHintsProxy (Proxy):
def __getitem__(self, Key):
if type(Key) == slice:
return self.values().__getitem__(Key)
elif type(Key) == int:
if Key < 0:
Key = self.__len__() + Key
return self._owner.hintAtIndex_(Key)
raise(KeyError)
def __setitem__(self, Key, Component):
self._owner.setHint_atIndex_(Component, Key)
def __delitem__(self, Key):
self._owner.removeObjectFromHintsAtIndex_(Key)
def __copy__(self):
return [x.copy() for x in self.values()]
def append(self, Hint):
self._owner.addHint_(Hint)
def extend(self, Hints):
for Hint in Hints:
self._owner.addHint_(Hint)
def insert(self, Index, Hint):
self._owner.insertHint_atIndex_(Hint, Index)
def remove(self, Hint):
self._owner.removeHint_(Hint)
def values(self):
return self._owner.pyobjc_instanceMethods.hints()
def setterMethod(self):
return self._owner.setHints_
class LayerAnchorsProxy (Proxy):
"""layer.anchors is a dict!!!"""
def __getitem__(self, Key):
if isString(Key):
return self._owner.anchorForName_(Key)
else:
raise KeyError
def __setitem__(self, Key, Anchor):
if isString(Key):
Anchor.setName_(Key)
self._owner.addAnchor_(Anchor)
else:
raise TypeError
def __delitem__(self, Key):
if isString(Key):
self._owner.removeAnchorWithName_(Key)
else:
raise TypeError
def __copy__(self):
return [x.copy() for x in self.values()]
def items(self):
Items = []
for key in self.keys():
Value = self._owner.anchorForName_(key)
Items.append((key, Value))
return Items
def values(self):
if self._owner.pyobjc_instanceMethods.anchors() is not None:
return self._owner.pyobjc_instanceMethods.anchors().allValues()
else:
return []
def keys(self):
if self._owner.pyobjc_instanceMethods.anchors() is not None:
return self._owner.pyobjc_instanceMethods.anchors().allKeys()
else:
return []
def append(self, Anchor):
self._owner.addAnchor_(Anchor)
def extend(self, Anchors):
for Anchor in Anchors:
self._owner.addAnchor_(Anchor)
def remove(self, Anchor):
self._owner.removeAnchor_(Anchor)
def insert(self, Index, Anchor):
self.append(Anchor)
def __len__(self):
return self._owner.countOfAnchors()
def setter(self, values):
newAnchors = NSMutableDictionary.dictionary()
if isinstance(values, (list, tuple)):
for anchor in values:
newAnchors[anchor.name] = anchor
elif isinstance(values, (NSDictionary, dict)):
for (key, anchor) in values.items():
newAnchors[anchor.name] = anchor
elif values is None:
pass
elif type(values) == type(self):
for anchor in values.values():
newAnchors[anchor.name] = anchor
else:
raise TypeError
self._owner.setAnchors_(newAnchors)
class LayerPathsProxy (Proxy):
def __getitem__(self, idx):
if type(idx) == slice:
return self.values().__getitem__(idx)
if idx < 0:
idx = self._owner.countOfPaths() + idx
return self._owner.pathAtIndex_(idx)
def __setitem__(self, idx, Path):
if idx < 0:
idx = self._owner.countOfPaths() + idx
self._owner.replacePathAtIndex_withPath_(idx, Path)
def __delitem__(self, idx):
if idx < 0:
idx = self._owner.countOfPaths() + idx
self._owner.removePathAtIndex_(idx)
def __copy__(self):
return [x.copy() for x in self.values()]
def append(self, Path):
if isinstance(Path, GSPath):
self._owner.addPath_(Path)
else:
raise ValueError
def extend(self, Paths):
if type(Paths) == type(self):
for path in Paths.values():
self._owner.addPath_(path)
elif isinstance(Paths, (list, tuple)):
for Path in Paths:
self.append(Path)
else:
raise ValueError
def remove(self, Path):
self._owner.removePath_(Path)
def insert(self, Index, Path):
self._owner.insertPath_atIndex_(Path, Index)
def values(self):
return self._owner.pyobjc_instanceMethods.paths()
def setterMethod(self):
return self._owner.setPaths_
class LayerSelectionProxy (Proxy):
def __getitem__(self, idx):
if type(idx) == slice:
return self.values().__getitem__(idx)
return self._owner.pyobjc_instanceMethods.selection().objectAtIndex_(idx)
def values(self):
return self._owner.pyobjc_instanceMethods.selection().array()
def append(self, object):
self._owner.addSelection_(object)
def extend(self, objects):
self._owner.addObjectsFromArrayToSelection_(list(objects))
def remove(self, object):
self._owner.removeObjectFromSelection_(object)
def insert(self, Index, object):
self._owner.addSelection_(object)
def setterMethod(self):
return self._owner.setSelection_
class PathNodesProxy (Proxy):
def __getitem__(self, idx):
if type(idx) == slice:
return self.values().__getitem__(idx)
return self._owner.nodeAtIndex_(idx)
def __setitem__(self, idx, Node):
self._owner.replaceObjectInNodesAtIndex_withObject_(idx, Node)
def __delitem__(self, idx):
self._owner.removeObjectFromNodesAtIndex_(idx)
def __len__(self):
return self._owner.countOfNodes()
def append(self, Node):
self._owner.addNode_(Node)
def remove(self, Node):
self._owner.removeNode_(Node)
def insert(self, Index, Node):
self._owner.insertNode_atIndex_(Node, Index)
def extend(self, objects):
self._owner.addNodes_(list(objects))
def values(self):
return self._owner.pyobjc_instanceMethods.nodes()
def setterMethod(self):
return self._owner.setNodes_
class FontTabsProxy (Proxy):
def __getitem__(self, idx):
if type(idx) == slice:
return self.values().__getitem__(idx)
if self._owner.parent:
if type(idx) is int:
if idx < 0:
idx = self.__len__() + idx
return self._owner.parent.windowController().tabBarControl().tabItemAtIndex_(idx + 1)
else:
raise(KeyError)
else:
raise Exception("The font is not connected to a document object")
def __setitem__(self, Key, Tab):
if type(Key) is int:
raise(NotImplementedError)
else:
raise(KeyError)
def __delitem__(self, idx):
if type(idx) is int:
if idx < 0:
idx = self.__len__() + idx
Tab = self._owner.parent.windowController().tabBarControl().tabItemAtIndex_(idx + 1)
self._owner.parent.windowController().tabBarControl().closeTabItem_(Tab)
else:
raise(KeyError)
def __iter__(self):
for idx in range(self.__len__()):
yield self.__getitem__(idx)
def __len__(self):
return self._owner.parent.windowController().tabBarControl().countOfTabItems() - 1
def values(self):
return self._owner.parent.windowController().tabBarControl().tabItems()[1:]
# Function shared by all user-selectable elements in a layer (nodes, anchors etc.)
def ObjectInLayer_selected(self):
try:
return self in self.layer.selection
except:
return False
def SetObjectInLayer_selected(self, state):
# Add to selection
if state and self not in self.layer.selection:
self.layer.selection.append(self)
# Remove
elif not state and self in self.layer.selection:
self.layer.selection.remove(self)
##################################################################################
#
#
#
# GSFont
#
#
#
##################################################################################
def ________________(): pass
def ____GSFont____(): pass
def ________________(): pass
'''
:mod:`GSFont`
===============================================================================
Implementation of the font object. This object is host to the :class:`masters <GSFontMaster>` used for interpolation. Even when no interpolation is involved, for the sake of object model consistency there will still be one master and one instance representing a single font.
Also, the :class:`glyphs <GSGlyph>` are attached to the Font object right here, not one level down to the masters. The different masters’ glyphs are available as :class:`layers <GSLayer>` attached to the glyph objects which are attached here.
.. class:: GSFont()
Properties
.. autosummary::
parent
masters
axes
instances
glyphs
classes
features
featurePrefixes
copyright
designer
designerURL
manufacturer
manufacturerURL
versionMajor
versionMinor
date
familyName
upm
note
kerning
userData
grid
gridSubDivisions
gridLength
keyboardIncrement
disablesNiceNames
customParameters
selection
selectedLayers
selectedFontMaster
masterIndex
currentText
tabs
fontView
currentTab
filepath
tool
tools
appVersion
Functions
.. autosummary::
save()
close()
show()
disableUpdateInterface()
enableUpdateInterface()
kerningForPair()
setKerningForPair()
removeKerningForPair()
newTab()
updateFeatures()
compileFeatures()
**Properties**
'''
def Font__new__(typ, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], (str, unicode)):
path = args[0]
URL = NSURL.fileURLWithPath_(path)
typeName = NSWorkspace.sharedWorkspace().typeOfFile_error_(path, None)[0]
if typeName is not None:
Doc = GSDocument.alloc().initWithContentsOfURL_ofType_error_(URL, typeName, None)
if Doc is not None:
return Doc[0].font
raise Exception("Unable to open font: %s", path)
return GSFont.alloc().init()
GSFont.__new__ = staticmethod(Font__new__)
def Font__init__(self, path=None):
pass
GSFont.__init__ = Font__init__
def Font__repr__(self):
return "<GSFont \"%s\" v%s.%s with %s masters and %s instances>" % (self.familyName, self.versionMajor, self.versionMinor, len(self.masters), len(self.instances))
GSFont.__repr__ = python_method(Font__repr__)
def Font__copy__(self, memo=None):
font = self.copy()
font.setParent_(self.parent)
return font
GSFont.mutableCopyWithZone_ = Font__copy__
GSFont.parent = property(lambda self: self.pyobjc_instanceMethods.parent())
'''
.. attribute:: parent
Returns the internal NSDocument document. Read-only.
:type: NSDocument
'''
GSFont.masters = property(lambda self: FontFontMasterProxy(self),
lambda self, value: FontFontMasterProxy(self).setter(value))
'''
.. attribute:: masters
Collection of :class:`GSFontMaster` objects.
:type: list
'''
GSInterpolationFontProxy.masters = property(lambda self: FontFontMasterProxy(self))
GSFont.instances = property(lambda self: FontInstancesProxy(self),
lambda self, value: FontInstancesProxy(self).setter(value))
'''
.. attribute:: instances
Collection of :class:`GSInstance` objects.
:type: list
'''
GSFont.axes = property(lambda self: FontAxesProxy(self),
lambda self, value: FontAxesProxy(self).setter(value))
'''
.. attribute:: axes
a list of dicts:
{"Name":"Weight", "Tag":"wght"}
:type: list
.. versionadded:: 2.5
'''
def __GSFont_getitem__(self, value):
return self.glyphForName_(value)
GSFont.__getitem__ = __GSFont_getitem__
GSFont.glyphs = property(lambda self: FontGlyphsProxy(self),
lambda self, value: FontGlyphsProxy(self).setter(value))
GSInterpolationFontProxy.glyphs = property(lambda self: FontGlyphsProxy(self),
lambda self, value: FontGlyphsProxy(self).setter(value))
'''
.. attribute:: glyphs
Collection of :class:`GSGlyph` objects. Returns a list, but you may also call glyphs using index or glyph name or character (as of v2.4) as key.
.. code-block:: python
# Access all glyphs
for glyph in font.glyphs:
print(glyph)
<GSGlyph "A" with 4 layers>
<GSGlyph "B" with 4 layers>
<GSGlyph "C" with 4 layers>
...
# Access one glyph
print(font.glyphs['A'])
<GSGlyph "A" with 4 layers>
# Access a glyph by character (new in v2.4.1)
print(font.glyphs[u'Ư'])
<GSGlyph "Uhorn" with 4 layers>
# Access a glyph by unicode (new in v2.4.1)
print(font.glyphs['01AF'])
<GSGlyph "Uhorn" with 4 layers>
# Add a glyph
font.glyphs.append(GSGlyph('adieresis'))
# Duplicate a glyph under a different name
newGlyph = font.glyphs['A'].copy()
newGlyph.name = 'A.alt'
font.glyphs.append(newGlyph)
# Delete a glyph
del(font.glyphs['A.alt'])
:type: list, dict
'''
GSFont.classes = property(lambda self: FontClassesProxy(self),
lambda self, value: FontClassesProxy(self).setter(value))
'''
.. attribute:: classes
Collection of :class:`GSClass` objects, representing OpenType glyph classes.
:type: list
.. code-block:: python
# add a class
font.classes.append(GSClass('uppercaseLetters', 'A B C D E'))
# access all classes
for class in font.classes:
print(class.name)
# access one class
print(font.classes['uppercaseLetters'].code)
# delete a class
del(font.classes['uppercaseLetters'])
'''
GSFont.features = property(lambda self: FontFeaturesProxy(self),
lambda self, value: FontFeaturesProxy(self).setter(value))
'''
.. attribute:: features
Collection of :class:`GSFeature` objects, representing OpenType features.
:type: list
.. code-block:: python
# add a feature
font.features.append(GSFeature('liga', 'sub f i by fi;'))
# access all features
for feature in font.features:
print(feature.code)
# access one feature
print(font.features['liga'].code)
# delete a feature
del(font.features['liga'])
'''
GSFont.featurePrefixes = property(lambda self: FontFeaturePrefixesProxy(self),
lambda self, value: FontFeaturePrefixesProxy(self).setter(value))
'''
.. attribute:: featurePrefixes
Collection of :class:`GSFeaturePrefix` objects, containing stuff that needs to be outside of the OpenType features.
:type: list
.. code-block:: python
# add a prefix
font.featurePrefixes.append(GSFeaturePrefix('LanguageSystems', 'languagesystem DFLT dflt;'))
# access all prefixes
for prefix in font.featurePrefixes:
print(prefix.code)
# access one prefix
print(font.featurePrefixes['LanguageSystems'].code)
# delete
del(font.featurePrefixes['LanguageSystems'])
'''
GSFont.copyright = property(lambda self: self.pyobjc_instanceMethods.copyright(), lambda self, value: self.setCopyright_(value))
'''
.. attribute:: copyright
:type: unicode'''
GSFont.designer = property(lambda self: self.pyobjc_instanceMethods.designer(), lambda self, value: self.setDesigner_(value))
'''
.. attribute:: designer
:type: unicode'''
GSFont.designerURL = property(lambda self: self.pyobjc_instanceMethods.designerURL(), lambda self, value: self.setDesignerURL_(value))
'''
.. attribute:: designerURL
:type: unicode'''
GSFont.manufacturer = property(lambda self: self.pyobjc_instanceMethods.manufacturer(), lambda self, value: self.setManufacturer_(value))
'''
.. attribute:: manufacturer
:type: unicode'''
GSFont.manufacturerURL = property(lambda self: self.pyobjc_instanceMethods.manufacturerURL(), lambda self, value: self.setManufacturerURL_(value))
'''
.. attribute:: manufacturerURL
:type: unicode'''
GSFont.versionMajor = property(lambda self: self.pyobjc_instanceMethods.versionMajor(), lambda self, value: self.setVersionMajor_(value))
'''
.. attribute:: versionMajor
:type: int
'''
GSFont.versionMinor = property(lambda self: self.pyobjc_instanceMethods.versionMinor(), lambda self, value: self.setVersionMinor_(value))
'''
.. attribute:: versionMinor
:type: int
'''
def __get_date__(self):
return datetime.datetime.fromtimestamp(self.pyobjc_instanceMethods.date().timeIntervalSince1970())
def __set_date__(self, date):
import datetime
if isinstance(date, datetime.datetime):
date = NSDate.alloc().initWithTimeIntervalSince1970_(time.mktime(date.timetuple()))
self.setDate_(date)
GSFont.date = property(lambda self: __get_date__(self), lambda self, value: __set_date__(self, value))
'''
.. attribute:: date
:type: NSDate
.. code-block:: python
print(font.date)
2015-06-08 09:39:05 +0000
# set date to now
font.date = NSDate.date()
'''
GSFont.familyName = property(lambda self: self.pyobjc_instanceMethods.familyName(),
lambda self, value: self.setFamilyName_(value))
'''
.. attribute:: familyName
Family name of the typeface.
:type: unicode'''
GSFont.upm = property(lambda self: self.unitsPerEm(), lambda self, value: self.setUnitsPerEm_(value))
'''
.. attribute:: upm
Units per Em
:type: int
'''
GSFont.note = property(lambda self: self.pyobjc_instanceMethods.note(),
lambda self, value: self.setNote_(value))
'''
.. attribute:: note
:type: unicode'''
GSFont.kerning = property(lambda self: self.pyobjc_instanceMethods.kerning(), lambda self, value: self.setKerning_(value))
'''
.. attribute:: kerning
A multi-level dictionary. The first level's key is the :attr:`GSFontMaster.id` (each master has its own kerning), the second level's key is the :attr:`GSGlyph.id` or class id (@MMK_L_XX) of the first glyph, the third level's key is a glyph id or class id (@MMK_R_XX) for the second glyph. The values are the actual kerning values.
To set a value, it is better to use the method :meth:`GSFont.setKerningForPair()`. This ensures a better data integrity (and is faster).
:type: dict
'''
GSFont.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
A dictionary to store user data. Use a unique key and only use objects that can be stored in a property list (string, list, dict, numbers, NSData) otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
font.userData['rememberToMakeCoffee'] = True
# delete value
del font.userData['rememberToMakeCoffee']
'''
GSFont.disablesNiceNames = property(lambda self: bool(self.pyobjc_instanceMethods.disablesNiceNames()), lambda self, value: self.setDisablesNiceNames_(value))
'''
.. attribute:: disablesNiceNames
Corresponds to the "Don't use nice names" setting from the Font Info dialog.
:type: bool
'''
GSFont.customParameters = property(lambda self: CustomParametersProxy(self))
'''
.. attribute:: customParameters
The custom parameters. List of :class:`GSCustomParameter` objects. You can access them by name or by index.
.. code-block:: python
# access all parameters
for parameter in font.customParameters:
print(parameter)
# set a parameter
font.customParameters['trademark'] = 'ThisFont is a trademark by MyFoundry.com'
# delete a parameter
del(font.customParameters['trademark'])
:type: list, dict
'''
GSFont.grid = property(lambda self: self.pyobjc_instanceMethods.gridMain(), lambda self, value: self.setGridMain_(value))
'''
.. attribute:: grid
.. versionadded:: 2.3
Corresponds to the "Grid spacing" setting from the Info dialog.
:type: int
'''
GSFont.gridSubDivisions = property(lambda self: self.pyobjc_instanceMethods.gridSubDivision(), lambda self, value: self.setGridSubDivision_(value))
'''
.. attribute:: gridSubDivisions
.. versionadded:: 2.3
Corresponds to the "Grid sub divisions" setting from the Info dialog.
:type: int
'''
GSFont.gridLength = property(lambda self: self.pyobjc_instanceMethods.gridLength())
'''
.. attribute:: gridLength
Ready calculated size of grid for rounding purposes. Result of division of grid with gridSubDivisions.
:type: float
'''
GSFont.disablesAutomaticAlignment = property(lambda self: bool(self.pyobjc_instanceMethods.disablesAutomaticAlignment()), lambda self, value: self.setDisablesAutomaticAlignment_(value))
'''
.. attribute:: disablesAutomaticAlignment
:type: bool
'''
GSFont.keyboardIncrement = property(lambda self: self.pyobjc_instanceMethods.keyboardIncrement(), lambda self, value: self.setKeyboardIncrement_(value))
'''
.. attribute:: keyboardIncrement
distance of movement by arrow keys. Default:1
:type: float
.. versionadded:: 2.3.1
'''
def Font_GetSelectedGlyphs(self):
return self.parent.windowController().glyphsController().selectedObjects()
def Font_SetSelectedGlyphs(self, value):
if not type(value) in (list, tuple):
raise ValueError('Argument needs to be a list.')
self.parent.windowController().glyphsController().setSelectedObjects_(value)
GSFont.selection = property(lambda self: Font_GetSelectedGlyphs(self), lambda self, value: Font_SetSelectedGlyphs(self, value))
'''
.. attribute:: selection
.. versionadded:: 2.3
Returns a list of all selected glyphs in the Font View.
:type: list
'''
def Font_selectedLayers(self):
return self.parent.selectedLayers()
GSFont.selectedLayers = property(lambda self: Font_selectedLayers(self))
'''
.. attribute:: selectedLayers
Returns a list of all selected layers in the active tab.
If a glyph is being edited, it will be the only glyph returned in this list. Otherwise the list will contain all glyphs selected with the Text tool.
:type: list
'''
GSFont.selectedFontMaster = property(lambda self: self.parent.selectedFontMaster())
'''
.. attribute:: selectedFontMaster
Returns the active master (selected in the toolbar).
:type: :class:`GSFontMaster`
'''
GSFont.masterIndex = property(lambda self: self.parent.windowController().masterIndex(), lambda self, value: self.parent.windowController().setMasterIndex_(value))
'''
.. attribute:: masterIndex
Returns the index of the active master (selected in the toolbar).
:type: int
'''
def __current_Text__(self):
try:
return self.parent.windowController().activeEditViewController().graphicView().displayString()
except:
pass
return None
def __set__current_Text__(self, String):
# if String is None:
# String = ""
self.parent.windowController().activeEditViewController().graphicView().setDisplayString_(String)
GSFont.currentText = property(lambda self: __current_Text__(self),
lambda self, value: __set__current_Text__(self, value))
'''
.. attribute:: currentText
The text of the current Edit view.
Unencoded and none ASCII glyphs will use a slash and the glyph name. (e.g: /a.sc). Setting unicode strings works.
:type: unicode
'''
# Tab interaction:
GSFont.tabs = property(lambda self: FontTabsProxy(self))
'''
.. attribute:: tabs
List of open Edit view tabs in UI, as list of :class:`GSEditViewController` objects.
.. code-block:: python
# open new tab with text
font.newTab('hello')
# access all tabs
for tab in font.tabs:
print(tab)
# close last tab
font.tabs[-1].close()
:type: list
'''
GSFont.fontView = property(lambda self: self.parent.windowController().tabBarControl().tabItemAtIndex_(0))
'''
.. attribute:: fontView
:type GSFontViewController
'''
def __GSFont__currentTab__(self):
try:
return self.parent.windowController().activeEditViewController()
except:
return None
def __GSFont__set_currentTab__(self, TabItem):
self.parent.windowController().tabBarControl().selectTabItem_(TabItem)
GSFont.currentTab = property(lambda self: __GSFont__currentTab__(self),
lambda self, value: __GSFont__set_currentTab__(self, value))
'''
.. attribute:: currentTab
Active Edit view tab.
:type: :class:`GSEditViewController`
'''
def Font_filepath(self):
if self.parent is not None and self.parent.fileURL() is not None:
return self.parent.fileURL().path()
else:
return None
GSFont.filepath = property(lambda self: Font_filepath(self))
'''
.. attribute:: filepath
On-disk location of GSFont object.
:type: unicode
'''
GSFont.toolIndex = property(lambda self: self.parent.windowController().selectedToolIndex(), lambda self, value: self.parent.windowController().setSelectedToolIndex_(value))
toolClassAbrevations = { # abrevation : className
"SelectTool": "GlyphsToolSelect",
"DrawTool": "GlyphsToolDraw",
"OtherTool": "GlyphsToolOther",
"PenTool": "PenTool",
"PrimitivesTool": "GlyphsToolPrimitives",
"RotateTool": "GlyphsToolRotate",
"ScaleTool": "GlyphsToolScale",
"TextTool": "GlyphsToolText",
"AnnotationTool": "AnnotationTool",
"HandTool": "GlyphsToolHand",
"ZoomTool": "GlyphsToolZoom",
"MeasurementTool": "GlyphsToolMeasurement",
"TrueTypeTool": "GlyphsToolTrueTypeInstructor",
}
toolClassAbrevationsReverse = dict((v, k) for k, v in toolClassAbrevations.items())
def __GSFont_tool__(self):
toolIndex = self.toolIndex
tool = self.parent.windowController().toolInstances()[toolIndex]
toolClassName = tool.className()
if toolClassName in toolClassAbrevationsReverse:
toolClassName = toolClassAbrevationsReverse[toolClassName]
return toolClassName
def __GSFont_setTool__(self, toolName):
if toolName in toolClassAbrevations:
toolName = toolClassAbrevations[toolName]
toolClass = NSClassFromString(toolName)
if toolClass:
self.parent.windowController().setToolForClass_(toolClass)
else:
sys.stderr.write('No tool found by the name "%s"' % (toolName))
GSFont.tool = property(lambda self: __GSFont_tool__(self), lambda self, value: __GSFont_setTool__(self, value))
'''
.. attribute:: tool
Name of tool selected in toolbar.
For available names including third-party plug-ins that come in the form of selectable tools, see `GSFont.tools` below.
.. code-block:: python
font.tool = 'SelectTool' # Built-in tool
font.tool = 'GlyphsAppSpeedPunkTool' # Third party plug-in
:type: string
.. versionadded:: 2.3
'''
def __GSFont_toolsList__(self):
tools = []
for tool in self.parent.windowController().toolInstances():
toolClassName = tool.className()
if toolClassName in toolClassAbrevationsReverse:
toolClassName = toolClassAbrevationsReverse[toolClassName]
tools.append(toolClassName)
return tools
GSFont.tools = property(lambda self: __GSFont_toolsList__(self))
'''
.. attribute:: tools
Prints a list of available tool names, including third-party plug-ins.
:type: list, string
.. versionadded:: 2.3
'''
GSFont.appVersion = property(lambda self: self.pyobjc_instanceMethods.appVersion())
'''
.. attribute:: appVersion
returns the version that the file was last saved
.. versionadded:: 2.5
**Functions**
'''
def Font__save__(self, path=None):
if self.parent is not None:
if path is None:
self.parent.saveDocument_(None)
else:
URL = NSURL.fileURLWithPath_(path)
if path.endswith('.glyphs'):
typeName = "com.schriftgestaltung.glyphs"
elif path.endswith('.ufo'):
typeName = "org.unifiedfontobject.ufo"
self.parent.writeSafelyToURL_ofType_forSaveOperation_error_(URL, typeName, 1, objc.nil)
elif path is not None:
Doc = GSDocument()
Doc.font = self
URL = NSURL.fileURLWithPath_(path)
if path.endswith('.glyphs'):
typeName = "com.schriftgestaltung.glyphs"
elif path.endswith('.ufo'):
typeName = "org.unifiedfontobject.ufo"
Doc.writeSafelyToURL_ofType_forSaveOperation_error_(URL, typeName, 1, objc.nil)
else:
raise("No path set")
GSFont.save = Font__save__
'''
.. function:: save([filePath])
Saves the font.
if no path is given, it saves to the existing location.
:param filePath: Optional file path
:type filePath: str
'''
def Font__close__(self, ignoreChanges=True):
if self.parent:
if ignoreChanges:
self.parent.close()
else:
self.parent.canCloseDocumentWithDelegate_shouldCloseSelector_contextInfo_(None, None, None)
GSFont.close = Font__close__
'''
.. function:: close([ignoreChanges = False])
Closes the font.
:param ignoreChanges: Optional. Ignore changes to the font upon closing
:type ignoreChanges: bool
.. function:: disableUpdateInterface()
Disables interface updates and thus speeds up glyph processing. Call this before you do big changes to the font, or to its glyphs. Make sure that you call Font.enableUpdateInterface() when you are done.
.. function:: enableUpdateInterface()
This re-enables the interface update. Only makes sense to call if you have disabled it earlier.
'''
def GSFont__show__(self):
if self not in Glyphs.fonts:
Glyphs.fonts.append(self)
else:
self.parent.windowController().showWindow_(None)
GSFont.show = GSFont__show__
'''
.. function:: show()
Makes font visible in the application, either by bringing an already open font window to the front or by appending a formerly invisible font object (such as the result of a `copy()` operation) as a window to the application.
.. versionadded:: 2.4.1
'''
def kerningForPair(self, FontMasterID, LeftKeringId, RightKerningId, direction=LTR):
if not LeftKeringId[0] == '@':
LeftKeringId = self.glyphs[LeftKeringId].id
if not RightKerningId[0] == '@':
RightKerningId = self.glyphs[RightKerningId].id
return self.kerningForFontMasterID_LeftKey_RightKey_direction_(FontMasterID, LeftKeringId, RightKerningId, direction)
GSFont.kerningForPair = kerningForPair
'''
.. function:: kerningForPair(fontMasterId, leftKey, rightKey [, direction = LTR])
This returns the kerning value for the two specified glyphs (leftKey or rightKey is the glyph name) or a kerning group key (@MMK_X_XX).
:param fontMasterId: The id of the FontMaster
:type fontMasterId: str
:param leftKey: either a glyph name or a class name
:type leftKey: str
:param rightKey: either a glyph name or a class name
:type rightKey: str
:param direction: optional writing direction (see Constants). Default is LTR.
:type direction: str
:return: The kerning value
:rtype: float
.. code-block:: python
# print(kerning between w and e for currently selected master)
font.kerningForPair(font.selectedFontMaster.id, 'w', 'e')
-15.0
# print(kerning between group T and group A for currently selected master)
# ('L' = left side of the pair and 'R' = left side of the pair)
font.kerningForPair(font.selectedFontMaster.id, '@MMK_L_T', '@MMK_R_A')
-75.0
# in the same font, kerning between T and A would be zero, because they use group kerning instead.
font.kerningForPair(font.selectedFontMaster.id, 'T', 'A')
9.22337203685e+18 # (this is the maximum number for 64 bit. It is used as an empty value)
'''
def setKerningForPair(self, FontMasterID, LeftKeringId, RightKerningId, Value, direction=LTR):
if not LeftKeringId[0] == '@':
LeftKeringId = self.glyphs[LeftKeringId].id
if not RightKerningId[0] == '@':
RightKerningId = self.glyphs[RightKerningId].id
self.setKerningForFontMasterID_LeftKey_RightKey_Value_direction_(FontMasterID, LeftKeringId, RightKerningId, Value, direction)
GSFont.setKerningForPair = setKerningForPair
'''
.. function:: setKerningForPair(fontMasterId, leftKey, rightKey, value [, direction = LTR])
This sets the kerning for the two specified glyphs (leftKey or rightKey is the glyphname) or a kerning group key (@MMK_X_XX).
:param fontMasterId: The id of the FontMaster
:type fontMasterId: str
:param leftKey: either a glyph name or a class name
:type leftKey: str
:param rightKey: either a glyph name or a class name
:type rightKey: str
:param value: kerning value
:type value: float
:param direction: optional writing direction (see Constants). Default is LTR.
:type direction: str
.. code-block:: python
# set kerning for group T and group A for currently selected master
# ('L' = left side of the pair and 'R' = left side of the pair)
font.setKerningForPair(font.selectedFontMaster.id, '@MMK_L_T', '@MMK_R_A', -75)
'''
def removeKerningForPair(self, FontMasterID, LeftKeringId, RightKerningId):
if LeftKeringId[0] != '@':
try:
LeftKeringId = self.glyphs[LeftKeringId].id
except:
pass
if RightKerningId[0] != '@':
try:
RightKerningId = self.glyphs[RightKerningId].id
except:
pass
self.removeKerningForFontMasterID_LeftKey_RightKey_(FontMasterID, LeftKeringId, RightKerningId)
GSFont.removeKerningForPair = removeKerningForPair
'''
.. function:: removeKerningForPair(FontMasterId, LeftKey, RightKey)
Removes the kerning for the two specified glyphs (LeftKey or RightKey is the glyphname) or a kerning group key (@MMK_X_XX).
:param FontMasterId: The id of the FontMaster
:type FontMasterId: str
:param LeftKey: either a glyph name or a class name
:type LeftKey: str
:param RightKey: either a glyph name or a class name
:type RightKey: str
.. code-block:: python
# remove kerning for group T and group A for all masters
# ('L' = left side of the pair and 'R' = left side of the pair)
for master in font.masters:
font.removeKerningForPair(master.id, '@MMK_L_T', '@MMK_R_A')
'''
def __GSFont__addTab__(self, tabText=""):
if self.parent:
return self.parent.windowController().addTabWithString_(tabText)
return None
GSFont.newTab = __GSFont__addTab__
'''
.. function:: newTab([tabText])
Opens a new tab in the current document window, optionally with text, and return that tab object
:param tabText: Text or glyph names escaped with '/'
.. code-block:: python
# open new tab
font.newTab('abcdef')
# or
tab = font.newTab('abcdef')
print(tab)
'''
def __GSFont__updateFeatures__(self):
GSFeatureGenerator.alloc().init().makeFeatures_(self)
self.compileFeatures()
GSFont.updateFeatures = __GSFont__updateFeatures__
'''
.. function:: updateFeatures()
Updates all OpenType features and classes at once, including generating necessary new features and classes. Equivalent to the "Update" button in the features panel. This already includes the compilation of the features (see `compileFeatures()`).
.. versionadded:: 2.4
'''
def __GSFont__compileFeatures__(self):
self.compileTempFontError_(None)
GSFont.compileFeatures = __GSFont__compileFeatures__
'''
.. function:: compileFeatures()
Compiles the features, thus making the new feature code functionally available in the editor. Equivalent to the "Test" button in the features panel.
.. versionadded:: 2.5
'''
##################################################################################
#
#
#
# GSFontMaster
#
#
#
##################################################################################
def _______________________(): pass
def ____GSFontMaster____(): pass
def _______________________(): pass
'''
:mod:`GSFontMaster`
===============================================================================
Implementation of the master object. This corresponds with the "Masters" pane in the Font Info. In Glyphs.app, the glyphs of each master are reachable not here, but as :class:`layers <GSLayer>` attached to the :class:`glyphs <GSGlyph>` attached to the :class:`font <GSFont>` object. See the infographic on top for better understanding.
.. class:: GSFontMaster()
'''
GSFontMaster.__new__ = staticmethod(GSObject__new__)
def FontMaster__init__(self):
pass
GSFontMaster.__init__ = FontMaster__init__
def FontMaster__repr__(self):
return "<GSFontMaster \"%s\" width %s weight %s>" % (self.name, self.widthValue, self.weightValue)
GSFontMaster.__repr__ = python_method(FontMaster__repr__)
GSFontMaster.mutableCopyWithZone_ = GSObject__copy__
'''
.. autosummary::
id
name
weight
width
axes
weightValue
widthValue
customValue
customName
ascender
capHeight
xHeight
descender
italicAngle
verticalStems
horizontalStems
alignmentZones
blueValues
otherBlues
guides
userData
customParameters
font
**Properties**
'''
GSFontMaster.id = property(lambda self: self.pyobjc_instanceMethods.id(), lambda self, value: self.setId_(value))
'''
.. attribute:: id
Used to identify :class:`Layers` in the Glyph
see :attr:`GSGlyph.layers`
.. code-block:: python
# ID of first master
print(font.masters[0].id)
3B85FBE0-2D2B-4203-8F3D-7112D42D745E
# use this master to access the glyph's corresponding layer
print(glyph.layers[font.masters[0].id])
<GSLayer "Light" (A)>
:type: unicode
'''
GSFontMaster.font = property(lambda self: self.pyobjc_instanceMethods.font(), lambda self, value: self.setFont_(value))
'''
.. attribute:: font
Reference to the :class:`GSFont` object that contains the master. Normally that is set by the app, only if the instance is not actually added to the font, then set this manually.
:type: GSFont
.. versionadded:: 2.5.2
'''
GSFontMaster.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
The human-readable identification of the master, e.g., "Bold Condensed".
:type: string
'''
GSFontMaster.axes = property(lambda self: MasterAxesProxy(self),
lambda self, value: MasterAxesProxy(self).setter(value))
'''
.. attribute:: axes
List of floats specifying the positions for each axis
.. code-block:: python
# setting a value for a specific axis
master.axes[2] = 12
# setting all values at once
master.axes = [100, 12, 3.5]
:type: list
.. versionadded:: 2.5.2
'''
# GSFontMaster.weight = property(lambda self: self.pyobjc_instanceMethods.weight(), lambda self, value: self.setWeight_(value))
'''
.. attribute:: weight
Human-readable weight name, chosen from list in Font Info. For the position in the interpolation design space, use :attr:`axes <GSFontMaster.axes>`.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.name` instead.
:type: string
'''
# GSFontMaster.width = property(lambda self: self.pyobjc_instanceMethods.width(), lambda self, value: self.setWidth_(value))
'''
.. attribute:: width
Human-readable width name, chosen from list in Font Info. For the position in the interpolation design space, use :attr:`axes <GSFontMaster.axes>`.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.name` instead.
:type: string
'''
# GSFontMaster.customName = property(lambda self: self.pyobjc_instanceMethods.custom(), lambda self, value: self.setCustom_(value))
'''
.. attribute:: customName
The name of the custom interpolation dimension.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: string
'''
GSFontMaster.weightValue = property(lambda self: self.pyobjc_instanceMethods.weightValue(), lambda self, value: self.setWeightValue_(value))
'''
.. attribute:: weightValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.widthValue = property(lambda self: self.pyobjc_instanceMethods.widthValue(), lambda self, value: self.setWidthValue_(value))
'''
.. attribute:: widthValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.customValue = property(lambda self: self.pyobjc_instanceMethods.customValue(), lambda self, value: self.setCustomValue_(value))
'''
.. attribute:: customValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.customValue1 = property(lambda self: self.pyobjc_instanceMethods.customValue1(), lambda self, value: self.setCustomValue1_(value))
'''
.. attribute:: customValue1
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.customValue2 = property(lambda self: self.pyobjc_instanceMethods.customValue2(), lambda self, value: self.setCustomValue2_(value))
'''
.. attribute:: customValue2
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.customValue3 = property(lambda self: self.pyobjc_instanceMethods.customValue3(), lambda self, value: self.setCustomValue3_(value))
'''
.. attribute:: customValue3
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`GSFontMaster.axes` instead.
:type: float
'''
GSFontMaster.ascender = property(lambda self: self.pyobjc_instanceMethods.ascender(), lambda self, value: self.setAscender_(value))
'''
.. attribute:: ascender
:type: float
'''
GSFontMaster.capHeight = property(lambda self: self.pyobjc_instanceMethods.capHeight(), lambda self, value: self.setCapHeight_(value))
'''
.. attribute:: capHeight
:type: float
'''
GSFontMaster.xHeight = property(lambda self: self.pyobjc_instanceMethods.xHeight(), lambda self, value: self.setXHeight_(value))
'''
.. attribute:: xHeight
:type: float
'''
GSFontMaster.descender = property(lambda self: self.pyobjc_instanceMethods.descender(), lambda self, value: self.setDescender_(value))
'''
.. attribute:: descender
:type: float
'''
GSFontMaster.italicAngle = property(lambda self: self.pyobjc_instanceMethods.italicAngle(), lambda self, value: self.setItalicAngle_(value))
'''
.. attribute:: italicAngle
:type: float
'''
GSFontMaster.verticalStems = property(lambda self: list(self.pyobjc_instanceMethods.verticalStems()), lambda self, value: self.setVerticalStems_(value))
'''
.. attribute:: verticalStems
The vertical stems. This is a list of numbers. For the time being, this can be set only as an entire list at once.
:type: list
.. code-block:: python
# Set stems
font.masters[0].verticalStems = [10, 11, 20]
'''
GSFontMaster.horizontalStems = property(lambda self: list(self.pyobjc_instanceMethods.horizontalStems()), lambda self, value: self.setHorizontalStems_(value))
'''
.. attribute:: horizontalStems
The horizontal stems. This is a list of numbers. For the time being, this can be set only as an entire list at once.
:type: list
.. code-block:: python
# Set stems
font.masters[0].horizontalStems = [10, 11, 20]
'''
GSFontMaster.alignmentZones = property(lambda self: self.pyobjc_instanceMethods.alignmentZones(), lambda self, value: self.setAlignmentZones_(value))
'''
.. attribute:: alignmentZones
Collection of :class:`GSAlignmentZone` objects.
:type: list
'''
def FontMaster_blueValues(self):
return GSGlyphsInfo.blueValues_(self.alignmentZones)
GSFontMaster.blueValues = property(lambda self: FontMaster_blueValues(self))
'''
.. attribute:: blueValues
PS hinting Blue Values calculated from the master's alignment zones. Read-only.
:type: list
'''
def FontMaster_otherBlues(self):
return GSGlyphsInfo.otherBlues_(self.alignmentZones)
GSFontMaster.otherBlues = property(lambda self: FontMaster_otherBlues(self))
'''
.. attribute:: otherBlues
PS hinting Other Blues calculated from the master's alignment zones. Read-only.
:type: list
'''
# new (guidelines at layers are also called just 'guides')
GSFontMaster.guides = property(lambda self: self.pyobjc_instanceMethods.guideLines(), lambda self, value: self.setGuideLines_(value))
# keep for compatibility
GSFontMaster.guideLines = GSFontMaster.guides
'''
.. attribute:: guides
Collection of :class:`GSGuideLine` objects. These are the font-wide (actually master-wide) red guidelines. For glyph-level guidelines (attached to the layers) see :attr:`GSLayer.guides`
:type: list
'''
GSFontMaster.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
A dictionary to store user data. Use a unique key, and only use objects that can be stored in a property list (bool, string, list, dict, numbers, NSData), otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
font.masters[0].userData['rememberToMakeTea'] = True
# delete value
del font.masters[0].userData['rememberToMakeTea']
'''
GSFontMaster.customParameters = property(lambda self: CustomParametersProxy(self))
'''
.. attribute:: customParameters
The custom parameters. List of :class:`GSCustomParameter` objects. You can access them by name or by index.
.. code-block:: python
# access all parameters
for parameter in font.masters[0].customParameters:
print(parameter)
# set a parameter
font.masters[0].customParameters['underlinePosition'] = -135
# delete a parameter
del(font.masters[0].customParameters['underlinePosition'])
:type: list, dict
'''
##################################################################################
#
#
#
# GSElement
#
#
#
##################################################################################
GSElement.selected = property(lambda self: ObjectInLayer_selected(self), lambda self, value: SetObjectInLayer_selected(self, value))
##################################################################################
#
#
#
# GSAlignmentZone
#
#
#
##################################################################################
def ___________________________(): pass
def ____GSAlignmentZone____(): pass
def ___________________________(): pass
'''
:mod:`GSAlignmentZone`
===============================================================================
Implementation of the alignmentZone object.
There is no distinction between Blue Zones and Other Zones. All negative zones (except the one with position 0) will be exported as Other Zones.
The zone for the baseline should have position 0 (zero) and a negative width.
.. class:: GSAlignmentZone([pos, size])
:param pos: The position of the zone
:param size: The size of the zone
'''
GSAlignmentZone.__new__ = staticmethod(GSObject__new__)
def AlignmentZone__init__(self, pos=0, size=20):
self.setPosition_(pos)
self.setSize_(size)
GSAlignmentZone.__init__ = AlignmentZone__init__
def AlignmentZone__repr__(self):
return "<GSAlignmentZone pos %s size %s>" % (self.position, self.size)
GSAlignmentZone.__repr__ = python_method(AlignmentZone__repr__)
GSAlignmentZone.mutableCopyWithZone_ = GSObject__copy__
'''
Properties
.. autosummary::
position
size
**Properties**
'''
GSAlignmentZone.position = property(lambda self: self.pyobjc_instanceMethods.position(), lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
:type: int
'''
GSAlignmentZone.size = property(lambda self: self.pyobjc_instanceMethods.size(), lambda self, value: self.setSize_(value))
'''
.. attribute:: size
:type: int
'''
def __elementDict__(self):
return dict(self.elementDict())
GSAlignmentZone.plistValue = __elementDict__
def __propertyListValue__(self):
return dict(self.propertyListValue())
GSTTStem.plistValue = __propertyListValue__
##################################################################################
#
#
#
# GSInstance
#
#
#
##################################################################################
def ____________________(): pass
def ____GSInstance____(): pass
def ____________________(): pass
'''
:mod:`GSInstance`
===============================================================================
Implementation of the instance object. This corresponds with the "Instances" pane in the Font Info.
.. class:: GSInstance()
'''
GSInstance.__new__ = staticmethod(GSObject__new__)
def Instance__init__(self):
pass
GSInstance.__init__ = Instance__init__
def Instance__repr__(self):
return "<GSInstance \"%s\" width %s weight %s>" % (self.name, self.widthValue, self.weightValue)
GSInstance.__repr__ = python_method(Instance__repr__)
GSInstance.mutableCopyWithZone_ = GSObject__copy__
'''
Properties
.. autosummary::
active
name
weight
width
axes
weightValue
widthValue
customValue
isItalic
isBold
linkStyle
familyName
preferredFamily
preferredSubfamilyName
windowsFamily
windowsStyle
windowsLinkedToStyle
fontName
fullName
font
customParameters
instanceInterpolations
manualInterpolation
interpolatedFontProxy
interpolatedFont
lastExportedFilePath
Functions
.. autosummary::
generate()
addAsMaster()
**Properties**
'''
GSInstance.active = property(lambda self: bool(self.pyobjc_instanceMethods.active()), lambda self, value: self.setActive_(value))
'''
.. attribute:: active
:type: bool
'''
GSInstance.name = property(lambda self: self.pyobjc_instanceMethods.name(), lambda self, value: self.setName_(value))
'''
.. attribute:: name
Name of instance. Corresponds to the "Style Name" field in the font info. This is used for naming the exported fonts.
:type: string
'''
GSInstance.weight = property(lambda self: self.pyobjc_instanceMethods.weightClass(), lambda self, value: self.setWeightClass_(value))
GSInstance.weightClass = property(lambda self: self.pyobjc_instanceMethods.weightClass(), lambda self, value: self.setWeightClass_(value))
'''
.. attribute:: weight
Human-readable weight name, chosen from list in Font Info. For actual position in interpolation design space, use GSInstance.weightValue.
:type: string
'''
GSInstance.width = property(lambda self: self.pyobjc_instanceMethods.widthClass(), lambda self, value: self.setWidthClass_(value))
GSInstance.widthClass = property(lambda self: self.pyobjc_instanceMethods.widthClass(), lambda self, value: self.setWidthClass_(value))
'''
.. attribute:: width
Human-readable width name, chosen from list in Font Info. For actual position in interpolation design space, use GSInstance.widthValue.
:type: string
'''
GSInstance.axes = property(lambda self: InstanceAxesProxy(self),
lambda self, value: InstanceAxesProxy(self).setter(value))
'''
.. attribute:: axes
List of floats specifying the positions for each axis
.. code-block:: python
# setting a value for a specific axis
instance.axes[2] = 12
# setting all values at once
instance.axes = [100, 12, 3.5]
:type: list
.. versionadded:: 2.5.2
'''
GSInstance.weightValue = property(lambda self: self.interpolationWeight(), lambda self, value: self.setInterpolationWeight_(value))
'''
.. attribute:: weightValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`axes <GSInstance.axes>` instead.
:type: float
'''
GSInstance.widthValue = property(lambda self: self.interpolationWidth(), lambda self, value: self.setInterpolationWidth_(value))
'''
.. attribute:: widthValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`axes <GSInstance.axes>` instead.
:type: float
'''
GSInstance.customValue = property(lambda self: self.interpolationCustom(), lambda self, value: self.setInterpolationCustom_(value))
'''
.. attribute:: customValue
Value for interpolation in design space.
.. deprecated:: 2.5.2
Use :attr:`axes <GSInstance.axes>` instead.
:type: float
'''
GSInstance.isItalic = property(lambda self: bool(self.pyobjc_instanceMethods.isItalic()), lambda self, value: self.setIsItalic_(value))
'''
.. attribute:: isItalic
Italic flag for style linking
:type: bool
'''
GSInstance.isBold = property(lambda self: bool(self.pyobjc_instanceMethods.isBold()), lambda self, value: self.setIsBold_(value))
'''
.. attribute:: isBold
Bold flag for style linking
:type: bool
'''
GSInstance.linkStyle = property(lambda self: self.pyobjc_instanceMethods.linkStyle(), lambda self, value: self.setLinkStyle_(value))
'''
.. attribute:: linkStyle
Linked style
:type: string
'''
GSInstance.familyName = property(lambda self: self.pyobjc_instanceMethods.familyName(), lambda self, value: self.setCustomParameter_forKey_(value, "familyName"))
'''
.. attribute:: familyName
familyName
:type: string
'''
GSInstance.preferredFamily = property(lambda self: self.pyobjc_instanceMethods.preferredFamily(), lambda self, value: self.setCustomParameter_forKey_(value, "preferredFamily"))
'''
.. attribute:: preferredFamily
preferredFamily
:type: string
'''
GSInstance.preferredSubfamilyName = property(lambda self: self.pyobjc_instanceMethods.preferredSubfamilyName(), lambda self, value: self.setCustomParameter_forKey_(value, "preferredSubfamilyName"))
'''
.. attribute:: preferredSubfamilyName
preferredSubfamilyName
:type: string
'''
GSInstance.windowsFamily = property(lambda self: self.pyobjc_instanceMethods.windowsFamily(), lambda self, value: self.setCustomParameter_forKey_(value, "styleMapFamilyName"))
'''
.. attribute:: windowsFamily
windowsFamily
:type: string
'''
GSInstance.windowsStyle = property(lambda self: self.pyobjc_instanceMethods.windowsStyle())
'''
.. attribute:: windowsStyle
windowsStyle
This is computed from "isBold" and "isItalic". Read-only.
:type: string
'''
GSInstance.windowsLinkedToStyle = property(lambda self: self.pyobjc_instanceMethods.windowsLinkedToStyle())
'''
.. attribute:: windowsLinkedToStyle
windowsLinkedToStyle. Read-only.
:type: string
'''
GSInstance.fontName = property(lambda self: self.pyobjc_instanceMethods.fontName(), lambda self, value: self.setCustomParameter_forKey_(value, "postscriptFontName"))
'''
.. attribute:: fontName
fontName (postscriptFontName)
:type: string
'''
GSInstance.fullName = property(lambda self: self.pyobjc_instanceMethods.fullName(), lambda self, value: self.setCustomParameter_forKey_(value, "postscriptFullName"))
'''
.. attribute:: fullName
fullName (postscriptFullName)
:type: string
'''
GSInstance.font = property(lambda self: self.pyobjc_instanceMethods.font(), lambda self, value: self.setFont_(value))
'''
.. attribute:: font
Reference to the :class:`GSFont` object that contains the instance. Normally that is set by the app, only if the instance is not actually added to the font, then set this manually.
:type: GSFont
.. versionadded:: 2.5.1
'''
GSInstance.customParameters = property(lambda self: CustomParametersProxy(self))
'''
.. attribute:: customParameters
The custom parameters. List of :class:`GSCustomParameter` objects. You can access them by name or by index.
.. code-block:: python
# access all parameters
for parameter in font.instances[0].customParameters:
print(parameter)
# set a parameter
font.instances[0].customParameters['hheaLineGap'] = 10
# delete a parameter
del(font.instances[0].customParameters['hheaLineGap'])
:type: list, dict
'''
GSInstance.instanceInterpolations = property(lambda self: self.pyobjc_instanceMethods.instanceInterpolations(), lambda self, value: self.setInstanceInterpolations_(value))
'''
.. attribute:: instanceInterpolations
A dict that contains the interpolation coefficients for each master.
This is automatically updated if you change interpolationWeight, interpolationWidth, interpolationCustom. It contains FontMaster IDs as keys and coefficients for that master as values.
Or, you can set it manually if you set manualInterpolation to True. There is no UI for this, so you need to do that with a script.
:type: dict
'''
GSInstance.manualInterpolation = property(lambda self: bool(self.pyobjc_instanceMethods.manualInterpolation()), lambda self, value: self.setManualInterpolation_(value))
'''
.. attribute:: manualInterpolation
Disables automatic calculation of instanceInterpolations
This allowes manual setting of instanceInterpolations.
:type: bool
'''
GSInstance.interpolatedFontProxy = property(lambda self: self.pyobjc_instanceMethods.interpolatedFont())
'''
.. attribute:: interpolatedFontProxy
a proxy font that acts similar to a normal font object but only interpolates the glyphs you ask it for.
It is not properly wrapped yet. So you need to use the ObjectiveC methods directly.
'''
def Instance_FontObject(self):
return self.font.generateInstance_error_(self, None)
GSInstance.interpolatedFont = property(lambda self: Instance_FontObject(self))
'''
.. attribute:: interpolatedFont
.. versionadded:: 2.3
Returns a ready interpolated :class:`GSFont` object representing this instance. Other than the source object, this interpolated font will contain only one master and one instance.
Note: When accessing several properties of such an instance consecutively, it is advisable to create the instance once into a variable and then use that. Otherwise, the instance object will be completely interpolated upon each access. See sample below.
.. code-block:: python
# create instance once
interpolated = Glyphs.font.instances[0].interpolatedFont
# then access it several times
print(interpolated.masters)
print(interpolated.instances)
(<GSFontMaster "Light" width 100.0 weight 75.0>)
(<GSInstance "Web" width 100.0 weight 75.0>)
:type: :class:`GSFont`
'''
def __set__lastExportedFilePath__(self, value):
if value:
self.tempData().setObject_forKey_(value, "lastExportedFilePath")
else:
self.tempData().removeObjectForKey_("lastExportedFilePath")
GSInstance.lastExportedFilePath = property(lambda self: self.tempData().objectForKey_("lastExportedFilePath"), lambda self, value: __set__lastExportedFilePath__(self, value))
'''
.. attribute:: lastExportedFilePath
.. versionadded:: 2.4.2
:type: unicode
'''
'''
**Functions**
.. function:: generate([Format, FontPath, AutoHint, RemoveOverlap, UseSubroutines, UseProductionNames, Containers])
Exports the instance. All parameters are optional.
:param str The format of the outlines: :const:`OTF` or :const:`TTF`. Default: OTF
:param str FontPath: The destination path for the final fonts. If None, it uses the default location set in the export dialog
:param bool AutoHint: If autohinting should be applied. Default: True
:param bool RemoveOverlap: If overlaps should be removed. Default: True
:param bool UseSubroutines: If to use subroutines for CFF. Default: True
:param bool UseProductionNames: If to use production names. Default: True
:param bool Containers: list of container formats. Use any of the following constants: :const:`PLAIN`, :const:`WOFF`, :const:`WOFF2`, :const:`EOT`. Default: PLAIN
:return: On success, True, on failure error message.
:rtype: bool/list
.. code-block:: python
# export all instances as OpenType (.otf) and WOFF2 to user's font folder
exportFolder = '/Users/myself/Library/Fonts'
for instance in Glyphs.font.instances:
instance.generate(FontPath = exportFolder, Containers = [PLAIN, WOFF2])
Glyphs.showNotification('Export fonts', 'The export of %s was successful.' % (Glyphs.font.familyName))
'''
class _ExporterDelegate_ (NSObject):
def init(self):
self = super(_ExporterDelegate_, self).init()
self.result = True
return self
def collectResults_(self, Error): # Error might be a NSString or a NSError
if Error.__class__.__name__ == "NSError":
String = Error.localizedDescription()
if Error.localizedRecoverySuggestion() and Error.localizedRecoverySuggestion().length() > 0:
String = String.stringByAppendingString_(Error.localizedRecoverySuggestion())
Error = unicode(String)
self.result = Error
def __Instance_Export__(self, Format=OTF, FontPath=None, AutoHint=True, RemoveOverlap=True, UseSubroutines=True, UseProductionNames=True, Containers=None, ConvertNames=False, DecomposeSmartStuff=True):
if Format not in [OTF, WOFF, WOFF2, TTF, UFO]:
raise KeyError('The font format is not supported: %s (only \'OTF\' and \'TTF\')' % Format)
ContainerList = None
if Containers is not None:
ContainerList = []
for Container in Containers:
if Container in [PLAIN, WOFF, WOFF2, EOT]:
ContainerList.append(Container.lower())
else:
raise KeyError('The container format is not supported: %s (only \'WOFF\' \'WOFF2\' \'plain\' and \'EOT\')' % Container)
if Format == UFO:
if not FontPath:
print("!", FontPath)
raise ValueError('Please provide a FontPath')
instanceFont = self.interpolatedFont
return instanceFont.export(Format=Format, FontPath=FontPath, UseProductionNames=UseProductionNames, DecomposeSmartStuff=DecomposeSmartStuff)
else:
Font = self.font
if FontPath is None:
FontPath = NSUserDefaults.standardUserDefaults().objectForKey_("OTFExportPath")
Format = Format.lower() # GSExportInstanceOperation uses Format as file .extension
Exporter = NSClassFromString("GSExportInstanceOperation").alloc().initWithFont_instance_outlineFormat_containers_(Font, self, Format, ContainerList)
if FontPath is None:
FontPath = NSUserDefaults.standardUserDefaults().objectForKey_("OTFExportPath")
Exporter.setInstallFontURL_(NSURL.fileURLWithPath_(FontPath))
# the following parameters can be set here or directly read from the instance.
Exporter.setAutohint_(AutoHint)
Exporter.setRemoveOverlap_(RemoveOverlap)
Exporter.setUseSubroutines_(UseSubroutines)
Exporter.setUseProductionNames_(UseProductionNames)
Exporter.setTempPath_(os.path.expanduser("~/Library/Application Support/Glyphs/Temp/")) # this has to be set correctly.
Delegate = _ExporterDelegate_.alloc().init() # the collectResults_() method of this object will be called on case the exporter has to report a problem.
Exporter.setDelegate_(Delegate)
Exporter.main()
if Delegate.result is True:
self.lastExportedFilePath = Exporter.finalFontPath()
else:
self.lastExportedFilePath = None
return Delegate.result
GSInstance.generate = __Instance_Export__
def __Font_Export__(self, Format=OTF, Instances=None, FontPath=None, AutoHint=True, RemoveOverlap=True, UseSubroutines=True, UseProductionNames=True, Containers=None, DecomposeSmartStuff=True):
if Format not in [OTF, WOFF, WOFF2, TTF, VARIABLE, UFO]:
raise KeyError('The font format is not supported: %s (only \'OTF\' and \'TTF\')' % Format)
if FontPath is None:
FontPath = Glyphs.defaults["OTFExportPath"]
if Format == VARIABLE:
Font = self.font()
Exporter = NSClassFromString("GlyphsFileFormatVariationFonts").alloc().init()
Exporter.setFont_(Font)
result = Exporter._exportToURL_error_(NSURL.fileURLWithPath_(FontPath), None)
return result
elif Format == UFO:
Font = self.font()
Exporter = NSClassFromString("GlyphsFileFormatUFO").alloc().init()
Exporter.setConvertNames_(UseProductionNames)
Exporter.setDecomposeSmartStuff_(DecomposeSmartStuff)
Exporter.setExportOptions_({"SeletedMasterIndexes": NSIndexSet.indexSetWithIndexesInRange_(NSRange(0, len(Font.masters)))})
result = Exporter.exportFont_toDirectory_error_(Font, NSURL.fileURLWithPath_(FontPath), None)
return result
else:
if not Instances:
Instances = [i for i in self.instances if i.active]
allResults = []
for i in Instances:
result = i.generate(Format=Format, FontPath=None, AutoHint=True, RemoveOverlap=True, UseSubroutines=True, UseProductionNames=True, Containers=None)
allResults.append(result)
return allResults
GSFont.export = __Font_Export__
def AddInstanceAsMaster(self):
self.font.addFontAsNewMaster_(self.interpolatedFont.masters[0])
GSInstance.addAsMaster = AddInstanceAsMaster
'''
.. function:: addAsMaster()
New after 2.6.2
Add this instance as a new master to the font. Identical to "Instance as Master" menu item in the Font Info’s Instances section.
'''
##################################################################################
#
#
#
# GSCustomParameter
#
#
#
##################################################################################
def ______________________________(): pass
def ____GSCustomParameter____(): pass
def ______________________________(): pass
'''
:mod:`GSCustomParameter`
===============================================================================
Implementation of the Custom Parameter object. It stores a name/value pair.
You can append GSCustomParameter objects for example to GSFont.customParameters, but this way you may end up with duplicates.
It is best to access the custom parameters through its dictionary interface like this:
.. code-block:: python
# access all parameters
for parameter in font.customParameters:
print(parameter)
# set a parameter
font.customParameters['trademark'] = 'ThisFont is a trademark by MyFoundry.com'
# delete a parameter
del(font.customParameters['trademark'])
.. class:: GSCustomParameter([name, value])
:param name: The name
:param size: The value
'''
GSCustomParameter.__new__ = staticmethod(GSObject__new__)
def CustomParameter__init__(self, name, value):
self.setName_(name)
self.setValue_(value)
GSCustomParameter.__init__ = CustomParameter__init__
def CustomParameter__repr__(self):
return "<GSCustomParameter %s: %s>" % (self.name, self.value)
GSCustomParameter.__repr__ = python_method(CustomParameter__repr__)
GSCustomParameter.mutableCopyWithZone_ = GSObject__copy__
'''
Properties
.. autosummary::
name
value
**Properties**
'''
GSCustomParameter.name = property(lambda self: self.pyobjc_instanceMethods.name(), lambda self, value: self.setName_(value))
'''
.. attribute:: name
:type: str
'''
GSCustomParameter.value = property(lambda self: self.pyobjc_instanceMethods.value(), lambda self, value: self.setValue_(value))
'''
.. attribute:: value
:type: str, list, dict, int, float
'''
##################################################################################
#
#
#
# GSClass
#
#
#
##################################################################################
def _________________(): pass
def ____GSClass____(): pass
def _________________(): pass
'''
:mod:`GSClass`
===============================================================================
Implementation of the class object. It is used to store OpenType classes.
For details on how to access them, please look at :class:`GSFont.classes`
.. class:: GSClass([tag, code])
:param tag: The class name
:param code: A list of glyph names, separated by space or newline
.. autosummary::
name
code
automatic
active
**Properties**
'''
GSClass.__new__ = staticmethod(GSObject__new__)
def Class__init__(self, name=None, code=None):
if name is not None:
self.setName_(name)
if code is not None:
self.setCode_(code)
GSClass.__init__ = Class__init__
def Class__repr__(self):
return "<GSClass \"%s\">" % (self.name)
GSClass.__repr__ = python_method(Class__repr__)
GSClass.mutableCopyWithZone_ = GSObject__copy__
GSClass.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
The class name
:type: unicode
'''
GSClass.code = property(lambda self: self.pyobjc_instanceMethods.code(),
lambda self, value: self.setCode_(value))
'''
.. attribute:: code
A string with space separated glyph names.
:type: unicode
'''
GSClass.automatic = property(lambda self: self.pyobjc_instanceMethods.automatic(),
lambda self, value: self.setAutomatic_(value))
'''
.. attribute:: automatic
Define whether this class should be auto-generated when pressing the 'Update' button in the Font Info.
:type: bool
'''
GSClass.active = property(lambda self: not self.disabled(),
lambda self, value: self.setDisabled_(not value))
'''
.. attribute:: active
:type: bool
.. versionadded:: 2.5
'''
##################################################################################
#
#
#
# GSFeaturePrefix
#
#
#
##################################################################################
def _________________________(): pass
def ____GSFeaturePrefix____(): pass
def _________________________(): pass
'''
:mod:`GSFeaturePrefix`
===============================================================================
Implementation of the featurePrefix object. It is used to store things that need to be outside of a feature like standalone lookups.
For details on how to access them, please look at :class:`GSFont.featurePrefixes`
.. class:: GSFeaturePrefix([tag, code])
:param tag: The Prefix name
:param code: The feature code in Adobe FDK syntax
.. autosummary::
name
code
automatic
active
**Properties**
'''
GSFeaturePrefix.__new__ = staticmethod(GSObject__new__)
def FeaturePrefix__init__(self, name=None, code=None):
if name is not None:
self.setName_(name)
if code is not None:
self.setCode_(code)
GSFeaturePrefix.__init__ = FeaturePrefix__init__
def FeaturePrefix__repr__(self):
return "<GSFeaturePrefix \"%s\">" % (self.name)
GSFeaturePrefix.__repr__ = python_method(FeaturePrefix__repr__)
GSFeaturePrefix.mutableCopyWithZone_ = GSObject__copy__
GSFeaturePrefix.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
The FeaturePrefix name
:type: unicode
'''
GSFeaturePrefix.code = property(lambda self: self.pyobjc_instanceMethods.code(),
lambda self, value: self.setCode_(value))
'''
.. attribute:: code
A String containing feature code.
:type: unicode
'''
GSFeaturePrefix.automatic = property(lambda self: self.pyobjc_instanceMethods.automatic(),
lambda self, value: self.setAutomatic_(value))
'''
.. attribute:: automatic
Define whether this should be auto-generated when pressing the 'Update' button in the Font Info.
:type: bool
'''
GSFeaturePrefix.active = property(lambda self: not self.disabled(),
lambda self, value: self.setDisabled_(not value))
'''
.. attribute:: active
:type: bool
.. versionadded:: 2.5
'''
##################################################################################
#
#
#
# GSFeature
#
#
#
##################################################################################
def ___________________(): pass
def ____GSFeature____(): pass
def ___________________(): pass
'''
:mod:`GSFeature`
===============================================================================
Implementation of the feature object. It is used to implement OpenType Features in the Font Info.
For details on how to access them, please look at :class:`GSFont.features`
.. class:: GSFeature([tag, code])
:param tag: The feature name
:param code: The feature code in Adobe FDK syntax
Properties
.. autosummary::
name
code
automatic
notes
active
Functions
.. autosummary::
update()
**Properties**
'''
GSFeature.__new__ = staticmethod(GSObject__new__)
def Feature__init__(self, name=None, code=None):
if name is not None:
self.setName_(name)
if code is not None:
self.setCode_(code)
GSFeature.__init__ = Feature__init__
def Feature__repr__(self):
return "<GSFeature \"%s\">" % (self.name)
GSFeature.__repr__ = python_method(Feature__repr__)
GSFeature.mutableCopyWithZone_ = GSObject__copy__
GSFeature.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
The feature name
:type: unicode
'''
GSFeature.code = property(lambda self: self.pyobjc_instanceMethods.code(),
lambda self, value: self.setCode_(value))
'''
.. attribute:: code
The Feature code in Adobe FDK syntax.
:type: unicode
'''
GSFeature.automatic = property(lambda self: self.pyobjc_instanceMethods.automatic(),
lambda self, value: self.setAutomatic_(value))
'''
.. attribute:: automatic
Define whether this feature should be auto-generated when pressing the 'Update' button in the Font Info.
:type: bool
'''
GSFeature.notes = property(lambda self: self.pyobjc_instanceMethods.notes(),
lambda self, value: self.setNotes_(value))
'''
.. attribute:: notes
Some extra text. Is shown in the bottom of the feature window. Contains the stylistic set name parameter
:type: unicode
'''
GSFeature.active = property(lambda self: not self.disabled(),
lambda self, value: self.setDisabled_(not value))
'''
.. attribute:: active
:type: bool
.. versionadded:: 2.5
**Functions**
.. function:: update()
Calls the automatic feature code generator for this feature.
You can use this to update all OpenType features before export.
:return: None
.. code-block:: python
# first update all features
for feature in font.features:
if feature.automatic:
feature.update()
# then export fonts
for instance in font.instances:
if instance.active:
instance.generate()
'''
##################################################################################
#
#
#
# GSSubstitution
#
#
#
##################################################################################
def ________________________(): pass
def ____GSSubstitution____(): pass
def ________________________(): pass
"""
############ NOCH NICHT DOKUMENTIERT WEIL NOCH NICHT AUSGEREIFT ############
"""
GSSubstitution.__new__ = staticmethod(GSObject__new__)
def Substitution__init__(self):
pass
GSSubstitution.__init__ = Substitution__init__
GSSubstitution.source = property(lambda self: self.pyobjc_instanceMethods.back(),
lambda self, value: self.setBack_(value))
GSSubstitution.source = property(lambda self: self.pyobjc_instanceMethods.source(),
lambda self, value: self.setSource_(value))
GSSubstitution.forward = property(lambda self: self.pyobjc_instanceMethods.fwd(),
lambda self, value: self.setFwd_(value))
GSSubstitution.target = property(lambda self: self.pyobjc_instanceMethods.target(),
lambda self, value: self.setTarget_(value))
GSSubstitution.languageTag = property(lambda self: self.pyobjc_instanceMethods.languageTag(),
lambda self, value: self.setLanguageTag_(value))
GSSubstitution.scriptTag = property(lambda self: self.pyobjc_instanceMethods.scriptTag(),
lambda self, value: self.setScriptTag_(value))
##################################################################################
#
#
#
# GSGlyph
#
#
#
##################################################################################
def _________________(): pass
def ____GSGlyph____(): pass
def _________________(): pass
'''
:mod:`GSGlyph`
===============================================================================
Implementation of the glyph object.
For details on how to access these glyphs, please see :class:`GSFont.glyphs`
.. class:: GSGlyph([name])
:param name: The glyph name
Properties
.. autosummary::
parent
layers
name
unicode
string
id
category
storeCategory
subCategory
storeSubCategory
script
storeScript
productionName
storeProductionName
glyphInfo
leftKerningGroup
rightKerningGroup
leftKerningKey
rightKerningKey
leftMetricsKey
rightMetricsKey
widthMetricsKey
export
color
colorObject
note
selected
mastersCompatible
userData
smartComponentAxes
lastChange
Functions
.. autosummary::
beginUndo()
endUndo()
updateGlyphInfo()
duplicate()
**Properties**
'''
GSGlyph.__new__ = staticmethod(GSObject__new__)
def Glyph__init__(self, name=None):
if name and (isinstance(name, str) or isinstance(name, unicode)):
self.setName_(name)
GSGlyph.__init__ = Glyph__init__
def Glyph__repr__(self):
return "<GSGlyph \"%s\" with %s layers>" % (self.name, len(self.layers))
GSGlyph.__repr__ = python_method(Glyph__repr__)
GSGlyph.mutableCopyWithZone_ = GSObject__copy__
GSGlyph.parent = property(lambda self: self.pyobjc_instanceMethods.parent(),
lambda self, value: self.setParent_(value))
'''
.. attribute:: parent
Reference to the :class:`GSFont` object.
:type: :class:`GSFont`
'''
GSGlyph.layers = property(lambda self: GlyphLayerProxy(self),
lambda self, value: GlyphLayerProxy(self).setter(value))
'''
.. attribute:: layers
The layers of the glyph, collection of :class:`GSLayer` objects. You can access them either by index or by layer ID, which can be a :attr:`GSFontMaster.id`.
The layer IDs are usually a unique string chosen by Glyphs.app and not set manually. They may look like this: 3B85FBE0-2D2B-4203-8F3D-7112D42D745E
:type: list, dict
.. code-block:: python
# get active layer
layer = font.selectedLayers[0]
# get glyph of this layer
glyph = layer.parent
# access all layers of this glyph
for layer in glyph.layers:
print(layer.name)
# access layer of currently selected master of active glyph ...
# (also use this to access a specific layer of glyphs selected in the Font View)
layer = glyph.layers[font.selectedFontMaster.id]
# ... which is exactly the same as:
layer = font.selectedLayers[0]
# directly access 'Bold' layer of active glyph
for master in font.masters:
if master.name == 'Bold':
id = master.id
break
layer = glyph.layers[id]
# add a new layer
newLayer = GSLayer()
newLayer.name = '{125, 100}' # (example for glyph-level intermediate master)
# you may set the master ID that this layer will be associated with, otherwise the first master will be used
newLayer.associatedMasterId = font.masters[-1].id # attach to last master
font.glyphs['a'].layers.append(newLayer)
# duplicate a layer under a different name
newLayer = font.glyphs['a'].layers[0].copy()
newLayer.name = 'Copy of layer'
# FYI, this will still be the old layer ID (in case of duplicating) at this point
print(newLayer.layerId)
font.glyphs['a'].layers.append(newLayer)
# FYI, the layer will have been assigned a new layer ID by now, after having been appended
print(newLayer.layerId)
# replace the second master layer with another layer
newLayer = GSLayer()
newLayer.layerId = font.masters[1].id # Make sure to sync the master layer ID
font.glyphs['a'].layers[font.masters[1].id] = newLayer
# delete last layer of glyph
# (Also works for master layers. They will be emptied)
del(font.glyphs['a'].layers[-1])
# delete currently active layer
del(font.glyphs['a'].layers[font.selectedLayers[0].layerId])
'''
def GSGlyph_setName(self, name):
if name == self.name:
pass
elif (self.parent and not self.parent.glyphs.has_key(name)) or not self.parent:
self.setName_changeName_update_(name, False, True)
else:
raise NameError('The glyph name \"%s\" already exists in the font.' % name)
GSGlyph.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: GSGlyph_setName(self, value))
'''
.. attribute:: name
The name of the glyph. It will be converted to a "nice name" (afii10017 to A-cy) (you can disable this behavior in font info or the app preference)
:type: unicode
'''
GSGlyph.unicode = property(lambda self: self.pyobjc_instanceMethods.unicode(),
lambda self, value: self.setUnicode_(value))
'''
.. attribute:: unicode
String with the hex Unicode value of glyph, if encoded.
:type: unicode
'''
def __glyph__unicode__(self):
codes = self.pyobjc_instanceMethods.unicodes()
if codes and len(codes):
return list(codes)
return None
GSGlyph.unicodes = property(lambda self: __glyph__unicode__(self),
lambda self, value: self.setUnicodes_(value))
'''
.. attribute:: unicodes
List of String‚ with the hex Unicode values of glyph, if encoded.
:type: unicode
'''
GSGlyph.production = property(lambda self: self.pyobjc_instanceMethods.production(),
lambda self, value: self.setProduction_(self, value))
GSGlyph.string = property(lambda self: self.charString())
'''
.. attribute:: string
String representation of glyph, if encoded.
This is similar to the string representation that you get when copying glyphs into the clipboard.
:type: unicode
'''
GSGlyph.id = property(lambda self: str(self.pyobjc_instanceMethods.id()),
lambda self, value: self.setId_(value))
'''
.. attribute:: id
An unique identifier for each glyph
:type: string
'''
GSGlyph.category = property(lambda self: self.pyobjc_instanceMethods.category(),
lambda self, value: self.setCategory_(value))
'''
.. attribute:: category
The category of the glyph. e.g. 'Letter', 'Symbol'
Setting only works if `storeCategory` is set (see below).
:type: unicode
'''
GSGlyph.storeCategory = property(lambda self: bool(self.pyobjc_instanceMethods.storeCategory()),
lambda self, value: self.setStoreCategory_(value))
'''
.. attribute:: storeCategory
Set to True in order to manipulate the `category` of the glyph (see above).
Makes it possible to ship custom glyph data inside a .glyphs file without a separate GlyphData file. Same as Cmd-Alt-i dialog in UI.
:type: bool
'''
GSGlyph.subCategory = property(lambda self: self.pyobjc_instanceMethods.subCategory(),
lambda self, value: self.setSubCategory_(value))
'''
.. attribute:: subCategory
The subCategory of the glyph. e.g. 'Uppercase', 'Math'
Setting only works if `storeSubCategory` is set (see below).
:type: unicode
'''
GSGlyph.storeSubCategory = property(lambda self: bool(self.pyobjc_instanceMethods.storeSubCategory()),
lambda self, value: self.setStoreSubCategory_(value))
'''
.. attribute:: storeSubCategory
Set to True in order to manipulate the `subCategory` of the glyph (see above).
Makes it possible to ship custom glyph data inside a .glyphs file without a separate GlyphData file. Same as Cmd-Alt-i dialog in UI.
:type: bool
.. versionadded:: 2.3
'''
GSGlyph.script = property(lambda self: self.pyobjc_instanceMethods.script(),
lambda self, value: self.setScript_(value))
'''
.. attribute:: script
The script of the glyph, e.g., 'latin', 'arabic'.
Setting only works if `storeScript` is set (see below).
:type: unicode
'''
GSGlyph.storeScript = property(lambda self: bool(self.pyobjc_instanceMethods.storeScript()),
lambda self, value: self.setStoreScript_(value))
'''
.. attribute:: storeScript
Set to True in order to manipulate the `script` of the glyph (see above).
Makes it possible to ship custom glyph data inside a .glyphs file without a separate GlyphData file. Same as Cmd-Alt-i dialog in UI.
:type: bool
.. versionadded:: 2.3
'''
GSGlyph.productionName = property(lambda self: self.pyobjc_instanceMethods.production(),
lambda self, value: self.setProduction_(value))
'''
.. attribute:: productionName
The productionName of the glyph.
Setting only works if `storeProductionName` is set (see below).
:type: unicode
.. versionadded:: 2.3
'''
GSGlyph.storeProductionName = property(lambda self: bool(self.storeProduction()),
lambda self, value: self.setStoreProduction_(value))
'''
.. attribute:: storeProductionName
Set to True in order to manipulate the `productionName` of the glyph (see above).
Makes it possible to ship custom glyph data inside a .glyphs file without a separate GlyphData file. Same as Cmd-Alt-i dialog in UI.
:type: bool
.. versionadded:: 2.3
'''
GSGlyph.glyphInfo = property(lambda self: self.parent.glyphsInfo().glyphInfoForGlyph_(self))
'''
.. attribute:: glyphInfo
:class:`GSGlyphInfo` object for this glyph with detailed information.
:type: :class:`GSGlyphInfo`
'''
def __GSGlyph_glyphDataEntryString__(self):
Unicode = self.unicode
if Unicode is None or len(Unicode) < 3:
Unicode = ""
Decompose = self.layers[0].componentNamesText()
if Decompose is not None and len(Decompose) > 0:
Decompose = "decompose=\"%s\" " % Decompose
else:
Decompose = ""
SubCategory = ""
if self.subCategory != "Other":
SubCategory = "subCategory=\"%s\" " % self.subCategory
Anchors = self.layers[0].anchors.keys()
if Anchors is not None and len(Anchors) > 0:
Anchors = "anchors=\"%s\" " % ", ".join(sorted(Anchors))
else:
Anchors = ""
GlyphInfo = self.glyphInfo
Accents = None
if GlyphInfo is not None:
Accents = GlyphInfo.accents
if Accents is not None and len(Accents) > 0:
Accents = "accents=\"%s\"" % ", ".join(sorted(Accents))
else:
Accents = ""
Production = ""
if self.productionName is not None and len(self.productionName) > 0:
Production = self.productionName
else:
Production = Glyphs.productionGlyphName(self.name)
if len(Production) > 0:
Production = "production=\"%s\"" % Production
else:
Production = ""
if self.note is not None and len(self.note) > 0:
Production += " altNames=\"%s\"" % self.note
return " <glyph unicode=\"%s\" name=\"%s\" %scategory=\"%s\" %sscript=\"%s\" description=\"\" %s%s%s />" % (Unicode, self.name, Decompose, self.category, SubCategory, self.script, Production, Anchors, Accents)
GSGlyph.glyphDataEntryString = __GSGlyph_glyphDataEntryString__
GSGlyph.leftKerningGroup = property(lambda self: self.pyobjc_instanceMethods.leftKerningGroup(),
lambda self, value: self.setLeftKerningGroup_(NSStr(value)))
'''
.. attribute:: leftKerningGroup
The leftKerningGroup of the glyph. All glyphs with the same text in the kerning group end up in the same kerning class.
:type: unicode'''
GSGlyph.rightKerningGroup = property(lambda self: self.pyobjc_instanceMethods.rightKerningGroup(),
lambda self, value: self.setRightKerningGroup_(NSStr(value)))
'''
.. attribute:: rightKerningGroup
The rightKerningGroup of the glyph. All glyphs with the same text in the kerning group end up in the same kerning class.
:type: unicode'''
def GSGlyph__leftKerningKey(self):
if self.leftKerningGroupId():
return self.leftKerningGroupId()
else:
return self.name
GSGlyph.leftKerningKey = property(lambda self: GSGlyph__leftKerningKey(self))
'''
.. attribute:: leftKerningKey
The key to be used with the kerning functions (:meth:`GSFont.kerningForPair()`, :meth:`GSFont.setKerningForPair()`:meth:`GSFont.removeKerningForPair()`).
If the glyph has a :att:`leftKerningGroup <GSGlyph.leftKerningGroup>` attribute, the internally used `@MMK_R_xx` notation will be returned (note that the R in there stands for the right side of the kerning pair for LTR fonts, which corresponds to the left kerning group of the glyph). If no group is given, the glyph’s name will be returned.
:type: string
.. code-block:: python
# Set kerning for 'T' and all members of kerning class 'a'
# For LTR fonts, always use the .rightKerningKey for the first (left) glyph of the pair, .leftKerningKey for the second (right) glyph.
font.setKerningForPair(font.selectedFontMaster.id, font.glyphs['T'].rightKerningKey, font.glyphs['a'].leftKerningKey, -60)
# which corresponds to:
font.setKerningForPair(font.selectedFontMaster.id, 'T', '@MMK_R_a', -60)
.. versionadded:: 2.4
'''
def GSGlyph__rightKerningKey(self):
if self.rightKerningGroupId():
return self.rightKerningGroupId()
else:
return self.name
GSGlyph.rightKerningKey = property(lambda self: GSGlyph__rightKerningKey(self))
'''
.. attribute:: rightKerningKey
The key to be used with the kerning functions (:meth:`GSFont.kerningForPair()`, :meth:`GSFont.setKerningForPair()`:meth:`GSFont.removeKerningForPair()`).
If the glyph has a :att:`rightKerningGroup <GSGlyph.rightKerningGroup>` attribute, the internally used `@MMK_L_xx` notation will be returned (note that the L in there stands for the left side of the kerning pair for LTR fonts, which corresponds to the right kerning group of the glyph). If no group is given, the glyph’s name will be returned.
See above for an example.
:type: string
.. versionadded:: 2.4
'''
GSGlyph.leftMetricsKey = property(lambda self: self.pyobjc_instanceMethods.leftMetricsKey(),
lambda self, value: self.setLeftMetricsKey_(NSStr(value)))
'''
.. attribute:: leftMetricsKey
The leftMetricsKey of the glyph. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode'''
GSGlyph.rightMetricsKey = property(lambda self: self.pyobjc_instanceMethods.rightMetricsKey(),
lambda self, value: self.setRightMetricsKey_(NSStr(value)))
'''
.. attribute:: rightMetricsKey
The rightMetricsKey of the glyph. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode'''
GSGlyph.widthMetricsKey = property(lambda self: self.pyobjc_instanceMethods.widthMetricsKey(),
lambda self, value: self.setWidthMetricsKey_(NSStr(value)))
'''
.. attribute:: widthMetricsKey
The widthMetricsKey of the glyph. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode'''
GSGlyph.export = property(lambda self: bool(self.pyobjc_instanceMethods.export()),
lambda self, value: self.setExport_(value))
'''
.. attribute:: export
Defines whether glyph will export upon font generation
:type: bool
'''
def __getColorIndex__(self):
color = self.colorIndex()
if color > 20 or color < 0:
return None
return color
def __setColorIndex(self, value):
if value is None:
value = -1
self.setColorIndex_(value)
GSGlyph.color = property(lambda self: __getColorIndex__(self),
lambda self, value: __setColorIndex(self, value))
'''
.. attribute:: color
Color marking of glyph in UI
:type: int
.. code-block:: python
glyph.color = 0 # red
glyph.color = 1 # orange
glyph.color = 2 # brown
glyph.color = 3 # yellow
glyph.color = 4 # light green
glyph.color = 5 # dark green
glyph.color = 6 # light blue
glyph.color = 7 # dark blue
glyph.color = 8 # purple
glyph.color = 9 # magenta
glyph.color = 10 # light gray
glyph.color = 11 # charcoal
glyph.color = None # not colored, white (before version 1235, use -1)
'''
def _set_Glyph_setColor(self, colorValue):
if isinstance(colorValue, (tuple, list)):
if max(colorValue) > 1:
colorValue = [c / 255.0 if c > 1 else c for c in colorValue]
colorValue = list(colorValue)
colorValue.extend((1, 1, 1))
print(colorValue)
colorValue = NSColor.colorWithDeviceRed_green_blue_alpha_(colorValue[0], colorValue[1], colorValue[2], colorValue[3])
self.setColor_(colorValue)
GSGlyph.colorObject = property(lambda self: self.pyobjc_instanceMethods.color(),
lambda self, value: _set_Glyph_setColor(self, value))
'''
.. attribute:: colorObject
NSColor object of glyph color, useful for drawing in plugins.
:type: NSColor
.. code-block:: python
# use glyph color to draw the outline
glyph.colorObject.set()
# Get RGB (and alpha) values (as float numbers 0..1, multiply with 256 if necessary)
R, G, B, A = glyph.colorObject.colorUsingColorSpace_(NSColorSpace.genericRGBColorSpace()).getRed_green_blue_alpha_(None, None, None, None)
print(R, G, B)
0.617805719376 0.958198726177 0.309286683798
print(round(R * 256), int(G * 256), int(B * 256))
158 245 245
# Draw layer
glyph.layers[0].bezierPath.fill()
# set the glyph color.
glyph.colorObject = NSColor.colorWithDeviceRed_green_blue_alpha_(247.0 / 255.0, 74.0 / 255.0, 62.9 / 255.0, 1)
new in 2.4.2:
glyph.colorObject = (247.0, 74.0, 62.9) #
or
glyph.colorObject = (247.0, 74.0, 62.9, 1) #
or
glyph.colorObject = (0.968, 0.29, 0.247, 1) #
.. versionadded:: 2.3
'''
GSGlyph.note = property(lambda self: self.pyobjc_instanceMethods.note(),
lambda self, value: self.setNote_(value))
'''
.. attribute:: note
:type: unicode
'''
def _get_Glyphs_is_selected(self):
Doc = self.parent.parent
return Doc.windowController().glyphsController().selectedObjects().containsObject_(self)
def _set_Glyphs_is_selected(self, isSelected):
ArrayController = self.parent.parent.windowController().glyphsController()
if isSelected:
ArrayController.addSelectedObjects_([self])
else:
ArrayController.removeSelectedObjects_([self])
GSGlyph.selected = property(lambda self: _get_Glyphs_is_selected(self),
lambda self, value: _set_Glyphs_is_selected(self, value))
'''
.. attribute:: selected
Return True if the Glyph is selected in the Font View.
This is different to the property font.selectedLayers which returns the selection from the active tab.
:type: bool
.. code-block:: python
# access all selected glyphs in the Font View
for glyph in font.glyphs:
if glyph.selected:
print(glyph)
'''
GSGlyph.mastersCompatible = property(lambda self: bool(self.pyobjc_instanceMethods.mastersCompatible()))
'''
.. attribute:: mastersCompatible
.. versionadded:: 2.3
Return True when all layers in this glyph are compatible (same components, anchors, paths etc.)
:type: bool
'''
GSGlyph.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
.. versionadded:: 2.3
A dictionary to store user data. Use a unique key and only use objects that can be stored in a property list (string, list, dict, numbers, NSData) otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
glyph.userData['rememberToMakeCoffee'] = True
# delete value
del glyph.userData['rememberToMakeCoffee']
'''
GSGlyph.smartComponentAxes = property(lambda self: GlyphSmartComponentAxesProxy(self), lambda self, value: GlyphSmartComponentAxesProxy(self).setter(value))
'''
.. attribute:: smartComponentAxes
.. versionadded:: 2.3
A list of :class:`GSSmartComponentAxis` objects.
These are the axis definitions for the interpolations that take place within the Smart Components. Corresponds to the 'Properties' tab of the glyph's 'Show Smart Glyph Settings' dialog.
Also see https://glyphsapp.com/tutorials/smart-components for reference.
:type: list
.. code-block:: python
# Adding two interpolation axes to the glyph
axis1 = GSSmartComponentAxis()
axis1.name = 'crotchDepth'
axis1.topValue = 0
axis1.bottomValue = -100
g.smartComponentAxes.append(axis1)
axis2 = GSSmartComponentAxis()
axis2.name = 'shoulderWidth'
axis2.topValue = 100
axis2.bottomValue = 0
g.smartComponentAxes.append(axis2)
# Deleting one axis
del g.smartComponentAxes[1]
'''
def __GSGlyph__lastChange__(self):
try:
return datetime.datetime.fromtimestamp(self.pyobjc_instanceMethods.lastChange().timeIntervalSince1970())
except:
return None
GSGlyph.lastChange = property(lambda self: __GSGlyph__lastChange__(self))
'''
.. attribute:: lastChange
.. versionadded:: 2.3
Change date when glyph was last changed as datetime.
Check Python’s :mod:`time` module for how to use the timestamp.
'''
'''
**Functions**
'''
def __BeginUndo(self):
self.undoManager().beginUndoGrouping()
GSGlyph.beginUndo = __BeginUndo
'''
.. function:: beginUndo()
Call this before you do a longer running change to the glyph. Be extra careful to call Glyph.endUndo() when you are finished.
'''
def __EndUndo(self):
self.undoManager().endUndoGrouping()
GSGlyph.endUndo = __EndUndo
'''
.. function:: endUndo()
This closes a undo group that was opened by a previous call of Glyph.beginUndo(). Make sure that you call this for each beginUndo() call.
'''
def __updateGlyphInfo(self, changeName=True):
if self.parent is not None:
self.parent.glyphsInfo().updateGlyphInfo_changeName_(self, changeName)
else:
GSGlyphsInfo.sharedManager().updateGlyphInfo_changeName_(self, changeName)
GSGlyph.updateGlyphInfo = __updateGlyphInfo
'''
.. function:: updateGlyphInfo(changeName = True)
Updates all information like name, unicode etc. for this glyph.
'''
def Glyph_Duplicate(self, name=None):
newGlyph = self.copyThin_options_(False, 4) # option: 4 copy all layers
if newGlyph.unicode:
newGlyph.unicode = None
if name:
newGlyph.name = name
else:
newGlyph.name = self.parent.saveNameForName_(newGlyph.name) # will add a .00X suffix
self.parent.glyphs.append(newGlyph)
return newGlyph
GSGlyph.duplicate = Glyph_Duplicate
'''
.. function:: duplicate([name])
Duplicate the glyph under a new name and return it.
If no name is given, .00n will be appended to it.
'''
##################################################################################
#
#
#
# GSLayer
#
#
#
##################################################################################
def _________________(): pass
def ____GSLayer____(): pass
def _________________(): pass
'''
:mod:`GSLayer`
===============================================================================
Implementation of the layer object.
For details on how to access these layers, please see :attr:`GSGlyph.layers`
.. class:: GSLayer()
Properties
.. autosummary::
parent
name
master
associatedMasterId
layerId
color
colorObject
components
guides
annotations
hints
anchors
paths
selection
LSB
RSB
TSB
BSB
width
leftMetricsKey
rightMetricsKey
widthMetricsKey
bounds
selectionBounds
background
backgroundImage
bezierPath
openBezierPath
userData
smartComponentPoleMapping
isSpecialLayer
isMasterLayer
Functions
.. autosummary::
decomposeComponents()
decomposeCorners()
compareString()
connectAllOpenPaths()
copyDecomposedLayer()
syncMetrics()
correctPathDirection()
removeOverlap()
roundCoordinates()
addNodesAtExtremes()
beginChanges()
endChanges()
cutBetweenPoints()
intersectionsBetweenPoints()
addMissingAnchors()
clearSelection()
clear()
swapForegroundWithBackground()
reinterpolate()
applyTransform()
**Properties**
'''
GSLayer.__new__ = staticmethod(GSObject__new__)
def Layer__init__(self):
pass
GSLayer.__init__ = Layer__init__
def Layer__repr__(self):
try:
assert self.name
name = self.name
except:
name = 'orphan'
try:
assert self.parent.name
parent = self.parent.name
except:
parent = 'orphan'
return "<%s \"%s\" (%s)>" % (self.className(), name, parent)
GSLayer.__repr__ = python_method(Layer__repr__)
GSLayer.mutableCopyWithZone_ = GSObject__copy__
GSLayer.parent = property(lambda self: self.pyobjc_instanceMethods.parent(),
lambda self, value: self.setParent_(value))
GSBackgroundLayer.parent = property(lambda self: self.pyobjc_instanceMethods.parent(),
lambda self, value: self.setParent_(value))
GSControlLayer.parent = property(lambda self: self.pyobjc_instanceMethods.parent())
'''
.. attribute:: parent
Reference to the :class:`glyph <GSGlyph>` object that this layer is attached to.
:type: :class:`GSGlyph`
'''
GSLayer.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
GSBackgroundLayer.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
Name of layer
:type: unicode
'''
def GSLayer__master__(self):
if self.associatedMasterId:
master = self.parent.parent.masters[self.associatedMasterId]
return master
GSLayer.master = property(lambda self: GSLayer__master__(self))
'''
.. attribute:: master
Master that this layer is connected to. Read only.
:type: GSFontMaster
'''
GSLayer.associatedMasterId = property(lambda self: self.pyobjc_instanceMethods.associatedMasterId(),
lambda self, value: self.setAssociatedMasterId_(value))
'''
.. attribute:: associatedMasterId
The ID of the :class:`fontMaster <GSFontMaster>` this layer belongs to, in case this isn't a master layer. Every layer that isn't a master layer needs to be attached to one master layer.
:type: unicode
.. code-block:: python
# add a new layer
newLayer = GSLayer()
newLayer.name = '{125, 100}' # (example for glyph-level intermediate master)
# you may set the master ID that this layer will be associated with, otherwise the first master will be used
newLayer.associatedMasterId = font.masters[-1].id # attach to last master
font.glyphs['a'].layers.append(newLayer)
'''
GSLayer.layerId = property(lambda self: self.pyobjc_instanceMethods.layerId(),
lambda self, value: self.setLayerId_(value))
'''
.. attribute:: layerId
The unique layer ID is used to access the layer in the :class:`glyphs <GSGlyph>` layer dictionary.
For master layers this should be the id of the :class:`fontMaster <GSFontMaster>`.
It could look like this: "FBCA074D-FCF3-427E-A700-7E318A949AE5"
:type: unicode
.. code-block:: python
# see ID of active layer
id = font.selectedLayers[0].layerId
print(id)
FBCA074D-FCF3-427E-A700-7E318A949AE5
# access a layer by this ID
layer = font.glyphs['a'].layers[id]
layer = font.glyphs['a'].layers['FBCA074D-FCF3-427E-A700-7E318A949AE5']
# for master layers, use ID of masters
layer = font.glyphs['a'].layers[font.masters[0].id]
'''
GSLayer.color = property(lambda self: __getColorIndex__(self),
lambda self, value: __setColorIndex(self, value))
'''
.. attribute:: color
Color marking of glyph in UI
:type: int
.. code-block:: python
glyph.color = 0 # red
glyph.color = 1 # orange
glyph.color = 2 # brown
glyph.color = 3 # yellow
glyph.color = 4 # light green
glyph.color = 5 # dark green
glyph.color = 6 # light blue
glyph.color = 7 # dark blue
glyph.color = 8 # purple
glyph.color = 9 # magenta
glyph.color = 10 # light gray
glyph.color = 11 # charcoal
glyph.color = None # not colored, white (before version 1235, use -1)
'''
GSLayer.colorObject = property(lambda self: self.pyobjc_instanceMethods.color(), lambda self, value: self.setColor_(value))
'''
.. attribute:: colorObject
.. versionadded:: 2.3
NSColor object of layer color, useful for drawing in plugins.
:type: NSColor
.. code-block:: python
# use layer color to draw the outline
layer.colorObject.set()
# Get RGB (and alpha) values (as float numbers 0..1, multiply with 256 if necessary)
R, G, B, A = layer.colorObject.colorUsingColorSpace_(NSColorSpace.genericRGBColorSpace()).getRed_green_blue_alpha_(None, None, None, None)
print(R, G, B)
0.617805719376 0.958198726177 0.309286683798
print(round(R * 256), int(G * 256), int(B * 256))
158 245 245
# Draw layer
layer.bezierPath.fill()
# set the layer color.
layer.colorObject = NSColor.colorWithDeviceRed_green_blue_alpha_(247.0 / 255.0, 74.0 / 255.0, 62.9 / 255.0, 1)
'''
GSLayer.components = property(lambda self: LayerComponentsProxy(self),
lambda self, value: LayerComponentsProxy(self).setter(value))
'''
.. attribute:: components
Collection of :class:`GSComponent` objects
:type: list
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# add component
layer.components.append(GSComponent('dieresis'))
# add component at specific position
layer.components.append(GSComponent('dieresis', NSPoint(100, 100)))
# delete specific component
for i, component in enumerate(layer.components):
if component.componentName == 'dieresis':
del(layer.components[i])
break
# copy components from another layer
import copy
layer.components = copy.copy(anotherlayer.components)
# copy one component to another layer
layer.components.append(anotherlayer.component[0].copy())
'''
GSLayer.guides = property(lambda self: LayerGuideLinesProxy(self),
lambda self, value: LayerGuideLinesProxy(self).setter(value))
GSLayer.guideLines = GSLayer.guides
'''
.. attribute:: guides
List of :class:`GSGuideLine` objects.
:type: list
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# access all guides
for guide in layer.guides:
print(guide)
# add guideline
newGuide = GSGuideLine()
newGuide.position = NSPoint(100, 100)
newGuide.angle = -10.0
layer.guides.append(newGuide)
# delete guide
del(layer.guides[0])
# copy guides from another layer
import copy
layer.guides = copy.copy(anotherlayer.guides)
'''
GSLayer.annotations = property(lambda self: LayerAnnotationProxy(self),
lambda self, value: LayerAnnotationProxy(self).setter(value))
'''
.. attribute:: annotations
List of :class:`GSAnnotation` objects.
:type: list
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# access all annotations
for annotation in layer.annotations:
print(annotation)
# add new annotation
newAnnotation = GSAnnotation()
newAnnotation.type = TEXT
newAnnotation.text = 'Fuck, this curve is ugly!'
layer.annotations.append(newAnnotation)
# delete annotation
del(layer.annotations[0])
# copy annotations from another layer
import copy
layer.annotations = copy.copy(anotherlayer.annotations)
'''
GSLayer.hints = property(lambda self: LayerHintsProxy(self),
lambda self, value: LayerHintsProxy(self).setter(value))
'''
.. attribute:: hints
List of :class:`GSHint` objects.
:type: list
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# access all hints
for hint in layer.hints:
print(hint)
# add a new hint
newHint = GSHint()
# change behaviour of hint here, like its attachment nodes
layer.hints.append(newHint)
# delete hint
del(layer.hints[0])
# copy hints from another layer
import copy
layer.hints = copy.copy(anotherlayer.hints)
# remember to reconnect the hints' nodes with the new layer's nodes
'''
GSLayer.anchors = property(lambda self: LayerAnchorsProxy(self),
lambda self, value: LayerAnchorsProxy(self).setter(value))
'''
.. attribute:: anchors
List of :class:`GSAnchor` objects.
:type: list, dict
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# access all anchors:
for a in layer.anchors:
print(a)
# add a new anchor
layer.anchors['top'] = GSAnchor()
# delete anchor
del(layer.anchors['top'])
# copy anchors from another layer
import copy
layer.anchors = copy.copy(anotherlayer.anchors)
'''
GSLayer.paths = property(lambda self: LayerPathsProxy(self),
lambda self, value: LayerPathsProxy(self).setter(value))
'''
.. attribute:: paths
List of :class:`GSPath` objects.
:type: list
.. code-block:: python
# access all paths
for path in layer.paths:
print(path)
# delete path
del(layer.paths[0])
# copy paths from another layer
import copy
layer.paths = copy.copy(anotherlayer.paths)
'''
GSLayer.selection = property(lambda self: LayerSelectionProxy(self), lambda self, value: LayerSelectionProxy(self).setter(value))
'''
.. attribute:: selection
List of all selected objects in the glyph. Read-only.
This list contains **all selected items**, including **nodes**, **anchors**, **guidelines** etc.
If you want to work specifically with nodes, for instance, you may want to cycle through the nodes (or anchors etc.) and check whether they are selected. See example below.
.. code-block:: python
# access all selected nodes
for path in layer.paths:
for node in path.nodes: # (or path.anchors etc.)
print(node.selected)
# clear selection
layer.clearSelection()
:type: list
'''
GSLayer.LSB = property(lambda self: self.pyobjc_instanceMethods.LSB(),
lambda self, value: self.setLSB_(float(value)))
'''
.. attribute:: LSB
Left sidebearing
:type: float
'''
GSLayer.RSB = property(lambda self: self.pyobjc_instanceMethods.RSB(),
lambda self, value: self.setRSB_(float(value)))
'''
.. attribute:: RSB
Right sidebearing
:type: float
'''
GSLayer.TSB = property(lambda self: self.pyobjc_instanceMethods.TSB(),
lambda self, value: self.setTSB_(float(value)))
'''
.. attribute:: TSB
Top sidebearing
:type: float
'''
GSLayer.BSB = property(lambda self: self.pyobjc_instanceMethods.BSB(),
lambda self, value: self.setBSB_(float(value)))
'''
.. attribute:: BSB
Bottom sidebearing
:type: float
'''
GSLayer.width = property(lambda self: self.pyobjc_instanceMethods.width(),
lambda self, value: self.setWidth_(float(value)))
GSBackgroundLayer.width = property(lambda self: self.pyobjc_instanceMethods.width(),
lambda self, value: None)
'''
.. attribute:: width
Layer width
:type: float
'''
def __GSLayer_vertWidth__(self):
value = self.pyobjc_instanceMethods.vertWidth()
if value >= 0 and value < 1000000:
return value
return None
def __GSLayer_setVertWidth__(self, value):
if value is None or value > 1000000 or value < 0:
value = NSNotFound
else:
value = float(value)
self.setVertWidth_(value)
GSLayer.vertWidth = property(lambda self: __GSLayer_vertWidth__(self),
lambda self, value: __GSLayer_setVertWidth__(self, value))
'''
.. attribute:: vertWidth
Layer vertical width
set it to None to reset it to default
:type: float
.. versionadded:: 2.6.2
'''
def __GSLayer_vertOrigin__(self):
value = self.pyobjc_instanceMethods.vertOrigin()
if value > -1000000 and value < 1000000:
return value
return None
def __GSLayer_setVertOrigin__(self, value):
if value is None or value > 1000000 or value < -1000000:
value = NSNotFound
else:
value = float(value)
self.setVertOrigin_(value)
GSLayer.vertOrigin = property(lambda self: __GSLayer_vertOrigin__(self),
lambda self, value: __GSLayer_setVertOrigin__(self, value))
'''
.. attribute:: vertOrigin
Layer vertical origin
set it to None to reset it to default
:type: float
.. versionadded:: 2.6.2
'''
GSLayer.leftMetricsKey = property(lambda self: self.pyobjc_instanceMethods.leftMetricsKey(),
lambda self, value: self.setLeftMetricsKey_(NSStr(value)))
'''
.. attribute:: leftMetricsKey
The leftMetricsKey of the layer. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode'''
GSLayer.rightMetricsKey = property(lambda self: self.pyobjc_instanceMethods.rightMetricsKey(),
lambda self, value: self.setRightMetricsKey_(NSStr(value)))
'''
.. attribute:: rightMetricsKey
The rightMetricsKey of the layer. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode'''
GSLayer.widthMetricsKey = property(lambda self: self.pyobjc_instanceMethods.widthMetricsKey(),
lambda self, value: self.setWidthMetricsKey_(NSStr(value)))
'''
.. attribute:: widthMetricsKey
The widthMetricsKey of the layer. This is a reference to another glyph by name or formula. It is used to synchronize the metrics with the linked glyph.
:type: unicode
'''
GSLayer.bounds = property(lambda self: self.pyobjc_instanceMethods.bounds())
'''
.. attribute:: bounds
Bounding box of whole glyph as NSRect. Read-only.
:type: NSRect
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# origin
print(layer.bounds.origin.x, layer.bounds.origin.y)
# size
print(layer.bounds.size.width, layer.bounds.size.height)
'''
GSLayer.selectionBounds = property(lambda self: self.boundsOfSelection())
'''
.. attribute:: selectionBounds
Bounding box of the layer's selection (nodes, anchors, components etc). Read-only.
:type: NSRect
'''
GSLayer.background = property(lambda self: self.pyobjc_instanceMethods.background(),
lambda self, value: self.setBackground_(value))
'''
.. attribute:: background
The background layer
:type: :class:`GSLayer`
'''
GSLayer.backgroundImage = property(lambda self: self.pyobjc_instanceMethods.backgroundImage(),
lambda self, value: self.setBackgroundImage_(value))
'''
.. attribute:: backgroundImage
The background image. It will be scaled so that 1 em unit equals 1 of the image's pixels.
:type: :class:`GSBackgroundImage`
.. code-block:: python
# set background image
layer.backgroundImage = GSBackgroundImage('/path/to/file.jpg')
# remove background image
layer.backgroundImage = None
'''
GSLayer.bezierPath = property(lambda self: self.pyobjc_instanceMethods.bezierPath())
'''
.. attribute:: bezierPath
.. versionadded:: 2.3
The layer as an NSBezierPath object. Useful for drawing glyphs in plug-ins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.bezierPath.fill()
:type: NSBezierPath
'''
GSLayer.openBezierPath = property(lambda self: self.pyobjc_instanceMethods.openBezierPath())
'''
.. attribute:: openBezierPath
.. versionadded:: 2.3
All open paths of the layer as an NSBezierPath object. Useful for drawing glyphs as outlines in plug-ins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.openBezierPath.stroke()
:type: NSBezierPath
'''
# keep for compatibility:
def Layer__drawBezierPath(self):
print("layer.drawBezierPath is deprecated. Please use layer.completeBezierPath")
return self.pyobjc_instanceMethods.drawBezierPath()
GSLayer.drawBezierPath = property(lambda self: Layer__drawBezierPath(self))
GSLayer.completeBezierPath = property(lambda self: self.pyobjc_instanceMethods.drawBezierPath())
'''
.. attribute:: completeBezierPath
.. versionadded:: 2.3.1
The layer as an NSBezierPath object including paths from components. Useful for drawing glyphs in plug-ins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.completeBezierPath.fill()
:type: NSBezierPath
'''
# keep for compatibility:
def Layer__drawOpenBezierPath(self):
print("layer.drawBezierPath is deprecated. Please use layer.completeBezierPath")
return self.pyobjc_instanceMethods.drawOpenBezierPath()
GSLayer.drawOpenBezierPath = property(lambda self: Layer__drawOpenBezierPath(self))
GSLayer.completeOpenBezierPath = property(lambda self: self.pyobjc_instanceMethods.drawOpenBezierPath())
'''
.. attribute:: completeOpenBezierPath
.. versionadded:: 2.3.1
All open paths of the layer as an NSBezierPath object including paths from components. Useful for drawing glyphs as outlines in plugins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.completeOpenBezierPath.stroke()
:type: NSBezierPath
'''
GSLayer.isAligned = property(lambda self: self.pyobjc_instanceMethods.isAligned())
'''
.. attribute:: isAligned
.. versionadded:: 2.3.1
Indicates if the components are auto aligned.
:type: bool
'''
GSLayer.isSpecialLayer = property(lambda self: bool(self.pyobjc_instanceMethods.isSpecialLayer()))
'''
.. attribute:: isSpecialLayer
If the layer is a brace, bracket or a smart component layer
:type: bool
'''
GSLayer.isMasterLayer = property(lambda self: bool(self.pyobjc_instanceMethods.isMasterLayer()))
'''
.. attribute:: isMasterLayer
If it is a master layer
:type: bool
'''
GSLayer.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
.. versionadded:: 2.3
A dictionary to store user data. Use a unique key and only use objects that can be stored in a property list (string, list, dict, numbers, NSData) otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
layer.userData['rememberToMakeCoffee'] = True
# delete value
del layer.userData['rememberToMakeCoffee']
'''
GSLayer.smartComponentPoleMapping = property(lambda self: SmartComponentPoleMappingProxy(self))
'''
.. attribute:: smartComponentPoleMapping
.. versionadded:: 2.3
Maps this layer to the poles on the interpolation axes of the Smart Glyph. The dictionary keys are the names of the :class:`GSSmartComponentAxis` objects. The values are 1 for bottom pole and 2 for top pole. Corresponds to the 'Layers' tab of the glyph's 'Show Smart Glyph Settings' dialog.
Also see https://glyphsapp.com/tutorials/smart-components for reference.
:type: dict, int
.. code-block:: python
# Map layers to top and bottom poles:
crotchDepthAxis = glyph.smartComponentAxes['crotchDepth']
shoulderWidthAxis = glyph.smartComponentAxes['shoulderWidth']
for layer in glyph.layers:
# Regular layer
if layer.name == 'Regular':
layer.smartComponentPoleMapping[crotchDepthAxis.id] = 2
layer.smartComponentPoleMapping[shoulderWidthAxis.id] = 2
# NarrowShoulder layer
elif layer.name == 'NarrowShoulder':
layer.smartComponentPoleMapping[crotchDepthAxis.id] = 2
layer.smartComponentPoleMapping[shoulderWidthAxis.id] = 1
# LowCrotch layer
elif layer.name == 'LowCrotch':
layer.smartComponentPoleMapping[crotchDepthAxis.id] = 1
layer.smartComponentPoleMapping[shoulderWidthAxis.id] = 2
**Functions**
.. function:: decomposeComponents()
Decomposes all components of the layer at once.
.. function:: decomposeCorners()
.. versionadded:: 2.4
Decomposes all corners of the layer at once.
.. function:: compareString()
Returns a string representing the outline structure of the glyph, for compatibility comparison.
:return: The comparison string
:rtype: string
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
print(layer.compareString())
oocoocoocoocooc_oocoocoocloocoocoocoocoocoocoocoocooc_
.. function:: connectAllOpenPaths()
Closes all open paths when end points are further than 1 unit away from each other.
.. function:: copyDecomposedLayer()
Returns a copy of the layer with all components decomposed.
:return: A new layer object
:rtype: :class:`GSLayer`
.. function:: syncMetrics()
Take over LSB and RSB from linked glyph.
.. code-block:: python
# sync metrics of all layers of this glyph
for layer in glyph.layers:
layer.syncMetrics()
.. function:: correctPathDirection()
Corrects the path direction.
'''
def RemoveOverlap(self, checkSelection=False):
removeOverlapFilter = NSClassFromString("GlyphsFilterRemoveOverlap").alloc().init()
removeOverlapFilter.removeOverlapFromLayer_checkSelection_error_(self, checkSelection, None)
GSLayer.removeOverlap = RemoveOverlap
'''
.. function:: removeOverlap()
Joins all contours.
:param checkSelection: if the selection will be considered. Default: False
.. function:: roundCoordinates()
.. versionadded:: 2.3
Round the positions of all coordinates to the grid (size of which is set in the Font Info).
'''
def Layer_addNodesAtExtremes(self, force=False):
self.addExtremePoints()
GSLayer.addNodesAtExtremes = Layer_addNodesAtExtremes
'''
.. function:: addNodesAtExtremes()
.. versionadded:: 2.3
Add nodes at layer's extrema, e.g., top, bottom etc.
'''
def __GSLayer_applyTransform__(self, transformStruct):
Transform = NSAffineTransform.transform()
Transform.setTransformStruct_(transformStruct)
self.transform_checkForSelection_doComponents_(Transform, False, True)
GSLayer.applyTransform = __GSLayer_applyTransform__
'''
.. function:: applyTransform
Apply a transformation matrix to the layer.
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
layer.applyTransform([
0.5, # x scale factor
0.0, # x skew factor
0.0, # y skew factor
0.5, # y scale factor
0.0, # x position
0.0 # y position
])
'''
def BeginChanges(self):
self.setDisableUpdates()
self.undoManager().beginUndoGrouping()
GSLayer.beginChanges = BeginChanges
'''
.. function:: beginChanges()
Call this before you do bigger changes to the Layer.
This will increase performance and prevent undo problems.
Always call layer.endChanges() if you are finished.
'''
def EndChanges(self):
self.setEnableUpdates()
self.undoManager().endUndoGrouping()
GSLayer.endChanges = EndChanges
'''
.. function:: endChanges()
Call this if you have called layer.beginChanges before. Make sure to group bot calls properly.
'''
def CutBetweenPoints(self, Point1, Point2):
GlyphsToolOther = NSClassFromString("GlyphsToolOther")
GlyphsToolOther.cutPathsInLayer_forPoint_endPoint_(self, Point1, Point2)
GSLayer.cutBetweenPoints = CutBetweenPoints
'''
.. function:: cutBetweenPoints(Point1, Point2)
Cuts all paths that intersect the line from Point1 to Point2
:param Point1: one point
:param Point2: the other point
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# cut glyph in half horizontally at y=100
layer.cutBetweenPoints(NSPoint(0, 100), NSPoint(layer.width, 100))
'''
def IntersectionsBetweenPoints(self, Point1, Point2, components=False):
return self.calculateIntersectionsStartPoint_endPoint_decompose_(Point1, Point2, components)
GSLayer.intersectionsBetweenPoints = IntersectionsBetweenPoints
NSConcreteValue.x = property(lambda self: self.pointValue().x)
NSConcreteValue.y = property(lambda self: self.pointValue().y)
'''
.. function:: intersectionsBetweenPoints(Point1, Point2, components = False)
Return all intersection points between a measurement line and the paths in the layer. This is basically identical to the measurement tool in the UI.
Normally, the first returned point is the starting point, the last returned point is the end point. Thus, the second point is the first intersection, the second last point is the last intersection.
:param Point1: one point
:param Point2: the other point
:param components: if components should be measured. Default: False
.. code-block:: python
layer = Glyphs.font.selectedLayers[0] # current layer
# show all intersections with glyph at y=100
intersections = layer.intersectionsBetweenPoints((-1000, 100), (layer.width+1000, 100))
print(intersections)
# left sidebearing at measurement line
print(intersections[1].x)
# right sidebearing at measurement line
print(layer.width - intersections[-2].x)
'''
def Layer_addMissingAnchors(self):
GSGlyphsInfo.sharedManager().updateAnchor_(self)
GSLayer.addMissingAnchors = Layer_addMissingAnchors
'''
.. function:: addMissingAnchors()
Adds missing anchors defined in the glyph database.
'''
'''
.. function:: clearSelection()
.. versionadded:: 2.3
Unselect all selected items in this layer.
'''
'''
.. function:: clear()
.. versionadded:: 2.3
Remove all elements from layer.
'''
'''
.. function:: swapForegroundWithBackground()
.. versionadded:: 2.3
Swap Foreground layer with Background layer.
'''
def Layer_replaceLayerWithInterpolation(self):
if self.parent:
self.parent.replaceLayerWithInterpolation_(self)
GSLayer.reinterpolate = Layer_replaceLayerWithInterpolation
'''
.. function:: reinterpolate()
.. versionadded:: 2.3
Re-interpolate a layer according the other layers and its interpolation values.
Applies to both master layers as well as brace layers and is equivalent to the 'Re-Interpolate' command from the Layers palette.
'''
def ControlLayer__new__(typ, *args, **kwargs):
if len(args) > 0:
return GSControlLayer.alloc().initWithChar_(args[0])
else:
return GSControlLayer.alloc().init()
GSControlLayer.__new__ = staticmethod(ControlLayer__new__)
def ControlLayer__init__(self, args):
pass
GSControlLayer.__init__ = ControlLayer__init__
def ControlLayer__repr__(self):
char = self.parent.unicodeChar()
if char == 10:
name = "newline"
elif char == 129:
name = "placeholder"
else:
name = GSGlyphsInfo.sharedManager().niceGlyphNameForName_("uni%.4X" % self.parent.unicodeChar())
return "<%s \"%s\">" % (self.className(), name)
GSControlLayer.__repr__ = python_method(ControlLayer__repr__)
def ControlLayer__newline__():
return GSControlLayer(10)
GSControlLayer.newline = staticmethod(ControlLayer__newline__)
def ControlLayer__placeholder__():
return GSControlLayer(129)
GSControlLayer.placeholder = staticmethod(ControlLayer__placeholder__)
def DrawLayerWithPen(self, pen, contours=True, components=True):
"""draw the object with a RoboFab segment pen"""
try:
pen.setWidth(self.width)
if self.note is not None:
pen.setNote(self.note)
except AttributeError:
# FontTools pens don't have these methods
pass
if contours:
for a in self.anchors:
a.draw(pen)
for c in self.paths:
c.draw(pen)
if components:
for c in self.components:
c.draw(pen)
try:
pen.doneDrawing()
except AttributeError:
# FontTools pens don't have a doneDrawing() method
pass
GSLayer.draw = DrawLayerWithPen
def DrawPointsWithPen(self, pen, contours=True, components=True):
"""draw the object with a point pen"""
if contours:
for p in self.paths:
p.drawPoints(pen)
if components:
for c in self.components:
c.drawPoints(pen)
GSLayer.drawPoints = DrawPointsWithPen
def _getPen_(self):
return GSPathPen.alloc().initWithLayer_(self)
GSLayer.getPen = _getPen_
GSLayer.getPointPen = _getPen_
def _invalidateContours_(self):
pass
GSLayer._invalidateContours = _invalidateContours_
##################################################################################
#
#
#
# GSAnchor
#
#
#
##################################################################################
def ___________________(): pass
def ____GSAnchor____(): pass
def ___________________(): pass
'''
:mod:`GSAnchor`
===============================================================================
Implementation of the anchor object.
For details on how to access them, please see :attr:`GSLayer.anchors`
.. class:: GSAnchor([name, pt])
:param name: the name of the anchor
:param pt: the position of the anchor
Properties
.. autosummary::
position
name
selected
**Properties**
'''
def Anchor__init__(self, name=None, pt=None):
if pt:
self.setPosition_(pt)
if name:
self.setName_(name)
GSAnchor.__init__ = Anchor__init__
def Anchor__repr__(self):
return "<GSAnchor \"%s\" x=%s y=%s>" % (self.name, self.position.x, self.position.y)
GSAnchor.__repr__ = python_method(Anchor__repr__)
GSAnchor.mutableCopyWithZone_ = GSObject__copy__
GSAnchor.position = property(lambda self: self.pyobjc_instanceMethods.position(),
lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
The position of the anchor
:type: NSPoint
.. code-block:: python
# read position
print(layer.anchors['top'].position.x, layer.anchors['top'].position.y)
# set position
layer.anchors['top'].position = NSPoint(175, 575)
# increase vertical position by 50 units
layer.anchors['top'].position = NSPoint(layer.anchors['top'].position.x, layer.anchors['top'].position.y + 50)
'''
GSAnchor.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
The name of the anchor
:type: unicode
.. attribute:: selected
Selection state of anchor in UI.
.. code-block:: python
# select anchor
layer.anchors[0].selected = True
# log selection state
print(layer.anchors[0].selected)
:type: bool
'''
def DrawAnchorWithPen(self, pen):
if hasattr(pen, "addAnchor"):
pen.addAnchor(self.name, (self.x, self.y))
else:
pen.moveTo(self.position)
pen.endPath()
GSAnchor.draw = DrawAnchorWithPen
def __GSAnchor_drawPoints__(self, pen):
"""draw the object with a point pen"""
pen.beginPath()
pen.addPoint((self.x, self.y), segmentType="move", smooth=False, name=self.name)
pen.endPath()
GSAnchor.drawPoints = __GSAnchor_drawPoints__
##################################################################################
#
#
#
# GSComponent
#
#
#
##################################################################################
def _______________________(): pass
def ____GSComponent____(): pass
def _______________________(): pass
'''
:mod:`GSComponent`
===============================================================================
Implementation of the component object.
For details on how to access them, please see :attr:`GSLayer.components`
.. class:: GSComponent(glyph [, position])
:param glyph: a :class:`GSGlyph` object or the glyph name
:param position: the position of the component as NSPoint
Properties
.. autosummary::
position
scale
rotation
componentName
component
layer
transform
bounds
automaticAlignment
anchor
selected
smartComponentValues
bezierPath
userData
Functions
.. autosummary::
decompose()
applyTransform()
**Properties**
'''
def Component__init__(self, glyph, offset=(0, 0), scale=(1, 1), transform=None):
"""
transformation: transform matrix as list of numbers
"""
if transform is None:
if scale != (1, 1):
xx, yy = scale
dx, dy = offset
self.transform = ((xx, 0, 0, yy, dx, dy))
elif offset != (0, 0):
self.setPositionFast_(offset)
else:
self.transform = transform
if glyph:
if isinstance(glyph, (str, unicode)):
self.setComponentName_(glyph)
elif isinstance(glyph, GSGlyph):
self.setComponentName_(glyph.name)
elif isinstance(glyph, "RGlyph"):
self.setComponentName_(glyph.name)
GSComponent.__init__ = Component__init__
def Component__repr__(self):
return "<GSComponent \"%s\" x=%s y=%s>" % (self.componentName, self.position.x, self.position.y)
GSComponent.__repr__ = python_method(Component__repr__)
GSComponent.mutableCopyWithZone_ = GSObject__copy__
GSComponent.position = property(lambda self: self.pyobjc_instanceMethods.position(),
lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
The Position of the component.
:type: NSPoint
'''
def GSComponent_getScale(self):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
return (x, y)
def GSComponent_setScale(self, scale):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
if type(scale) == tuple:
self.setScaleX_scaleY_rotation_(scale[0], scale[1], r)
elif type(scale) == int or type(scale) == float:
self.setScaleX_scaleY_rotation_(scale, scale, r)
GSComponent.scale = property(lambda self: GSComponent_getScale(self),
lambda self, value: GSComponent_setScale(self, value))
'''
.. attribute:: scale
Scale factor of image.
A scale factor of 1.0 (100%) means that 1 em unit equals 1 of the image's pixels.
This sets the scale factor for x and y scale simultaneously. For separate scale factors, please use the transformation matrix.
:type: float or tuple
'''
def GSComponent_getRotation(self):
(x, y, rotation) = self.getScaleX_scaleY_rotation_(None, None, None)
return rotation
def GSComponent_setRotation(self, rotation):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
self.setScaleX_scaleY_rotation_(x, y, rotation)
GSComponent.rotation = property(lambda self: GSComponent_getRotation(self),
lambda self, value: GSComponent_setRotation(self, value))
'''
.. attribute:: rotation
Rotation angle of component.
:type: float
'''
GSComponent.componentName = property(lambda self: self.pyobjc_instanceMethods.componentName(),
lambda self, value: self.setComponentName_(objcObject(value)))
'''
.. attribute:: componentName
The glyph name the component is pointing to.
:type: unicode
'''
GSComponent.name = property(lambda self: self.pyobjc_instanceMethods.componentName(),
lambda self, value: self.setComponentName_(value))
'''
.. attribute:: name
The glyph name the component is pointing to.
:type: unicode
.. versionadded:: 2.5
'''
GSComponent.component = property(lambda self: self.pyobjc_instanceMethods.component())
'''
.. attribute:: component
The :class:`GSGlyph` the component is pointing to. This is read-only. In order to change the referenced base glyph, set :attr:`componentName <GSComponent.componentName>` to the new glyph name.
:type: :class:`GSGlyph`
'''
GSComponent.componentLayer = property(lambda self: self.pyobjc_instanceMethods.componentLayer())
'''
.. attribute:: componentLayer
The :class:`GSLayer` the component is pointing to. This is read-only. In order to change the referenced base glyph, set :attr:`componentName <GSComponent.componentName>` to the new glyph name.
For Smart Components, the `componentLayer` contains the interpolated result.
:type: :class:`GSLayer`
.. versionadded:: 2.5
'''
GSComponent.transform = property(lambda self: self.transformStruct(),
lambda self, value: self.setTransformStruct_(value))
'''
.. attribute:: transform
Transformation matrix of the component.
.. code-block:: python
component = layer.components[0]
component.transform = ((
0.5, # x scale factor
0.0, # x skew factor
0.0, # y skew factor
0.5, # y scale factor
0.0, # x position
0.0 # y position
))
:type: NSAffineTransformStruct
'''
GSComponent.bounds = property(lambda self: self.pyobjc_instanceMethods.bounds())
'''
.. attribute:: bounds
Bounding box of the component, read-only
:type: NSRect
.. code-block:: python
component = layer.components[0] # first component
# origin
print(component.bounds.origin.x, component.bounds.origin.y)
# size
print(component.bounds.size.width, component.bounds.size.height)
'''
# keep for compatibility:
GSComponent.disableAlignment = property(lambda self: bool(self.pyobjc_instanceMethods.disableAlignment()),
lambda self, value: self.setDisableAlignment_(value))
# new:
GSComponent.automaticAlignment = property(lambda self: bool(self.doesAlign() or self.doesAttach()),
lambda self, value: self.setDisableAlignment_(not bool(value)))
'''
.. attribute:: automaticAlignment
Defines whether the component is automatically aligned.
:type: bool'''
GSComponent.alignment = property(lambda self: self.pyobjc_instanceMethods.alignment(),
lambda self, value: self.setAlignment_(value))
'''
.. attribute:: alignment
.. versionadded:: 2.5
TODO
'''
GSComponent.locked = property(lambda self: bool(self.pyobjc_instanceMethods.locked()),
lambda self, value: self.setLocked_(value))
'''
.. attribute:: locked
.. versionadded:: 2.5
If the component is locked
TODO
:type: bool
'''
GSComponent.anchor = property(lambda self: self.pyobjc_instanceMethods.anchor(),
lambda self, value: self.setAnchor_(value))
'''
.. attribute:: anchor
If more than one anchor/_anchor pair would match, this property can be used to set the anchor to use for automatic alignment
This can be set from the anchor button in the component info box in the UI
:type: unicode'''
'''
.. attribute:: selected
Selection state of component in UI.
.. code-block:: python
# select component
layer.components[0].selected = True
# print(selection state)
print(layer.components[0].selected)
:type: bool
'''
def DrawComponentWithPen(self, pen):
pen.addComponent(self.componentName, self.transform)
GSComponent.draw = DrawComponentWithPen
GSComponent.drawPoints = DrawComponentWithPen
GSComponent.smartComponentValues = property(lambda self: smartComponentValuesProxy(self))
'''
.. attribute:: smartComponentValues
.. versionadded:: 2.3
Dictionary of interpolations values of the Smart Component. Key are the names, values are between the top and the bottom value of the corresponding :class:`GSSmartComponentAxis` objects. Corresponds to the values of the 'Smart Component Settings' dialog. Returns None if the component is not a Smart Component.
Also see https://glyphsapp.com/tutorials/smart-components for reference.
:type: dict, int
.. code-block:: python
# Narrow shoulders of m
glyph = font.glyphs['m']
glyph.layers[0].components[1].smartComponentValues['shoulderWidth'] = 30 # First shoulder. Index is 1, given that the stem is also a component with index 0
glyph.layers[0].components[2].smartComponentValues['shoulderWidth'] = 30 # Second shoulder. Index is 2, given that the stem is also a component with index 0
# Low crotch of h
glyph = font.glyphs['h']
crotchDepthAxis = glyph.smartComponentAxes['crotchDepth']
glyph.layers[0].components[1].smartComponentValues[crotchDepthAxis.id] = -77 # Shoulder. Index is 1, given that the stem is also a component with index 0
# Check whether a component is a smart component
for component in layer.components:
if component.smartComponentValues is not None:
# do stuff
'''
GSComponent.bezierPath = property(lambda self: self.pyobjc_instanceMethods.bezierPath())
'''
.. attribute:: bezierPath
.. versionadded:: 2.3
The component as an NSBezierPath object. Useful for drawing glyphs in plugins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.components[0].bezierPath.fill()
:type: NSBezierPath
'''
GSComponent.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
.. versionadded:: 2.5
A dictionary to store user data. Use a unique key and only use objects that can be stored in a property list (string, list, dict, numbers, NSData) otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
component.userData['rememberToMakeCoffee'] = True
# delete value
del component.userData['rememberToMakeCoffee']
'''
'''
**Functions**
'''
GSComponent.parent = property(lambda self: self.pyobjc_instanceMethods.parent(),
lambda self, value: self.setParent_(value))
def __GSComponent_decompose__(self, doAnchors=True, doHints=True):
self.parent.decomposeComponent_doAnchors_doHints_(self, doAnchors, doHints)
GSComponent.decompose = __GSComponent_decompose__
'''
.. function:: decompose(doAnchors = True, doHints = True)
:param doAnchors: get anchors from components
:param doHints: get hints from components
Decomposes the component.
'''
def __GSComponent_applyTransform__(self, transformStruct):
transform = self.transform
oldTransform = NSAffineTransform.transform()
oldTransform.setTransformStruct_(transform)
newTransform = NSAffineTransform.transform()
newTransform.setTransformStruct_(transformStruct)
oldTransform.appendTransform_(newTransform)
self.setTransformStruct_(oldTransform.transformStruct())
GSComponent.applyTransform = __GSComponent_applyTransform__
'''
.. function:: applyTransform
Apply a transformation matrix to the component.
.. code-block:: python
component = layer.components[0]
component.applyTransform((
0.5, # x scale factor
0.0, # x skew factor
0.0, # y skew factor
0.5, # y scale factor
0.0, # x position
0.0 # y position
))
'''
##################################################################################
#
#
#
# GSSmartComponentAxis
#
#
#
##################################################################################
def __________________________________(): pass
def ____GSSmartComponentAxis____(): pass
def __________________________________(): pass
'''
:mod:`GSSmartComponentAxis`
===============================================================================
Implementation of the Smart Component interpolation axis object.
For details on how to access them, please see :attr:`GSGlyph.smartComponentAxes`
.. versionadded:: 2.3
.. class:: GSSmartComponentAxis()
Properties
.. autosummary::
name
topValue
bottomValue
**Properties**
'''
GSSmartComponentAxis = GSPartProperty
GSSmartComponentAxis.__new__ = staticmethod(GSObject__new__)
def SmartComponentProperty__init__(self):
pass
GSSmartComponentAxis.__init__ = SmartComponentProperty__init__
def SmartComponentProperty__repr__(self):
return "<GSSmartComponentAxis \"%s\">" % (self.name)
GSSmartComponentAxis.__repr__ = python_method(SmartComponentProperty__repr__)
GSSmartComponentAxis.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
Name of the axis. The name is for display purpose only.
:type: str
'''
GSSmartComponentAxis.id = property(lambda self: self.pyobjc_instanceMethods.id())
'''
.. attribute:: id
Id of the axis. This Id will be used to map the Smart Glyph's layers to the poles of the interpolation. See :attr:`GSLayer.smartComponentPoleMapping`
:type: str
.. versionadded:: 2.5
'''
GSSmartComponentAxis.topValue = property(lambda self: self.pyobjc_instanceMethods.topValue(),
lambda self, value: self.setTopValue_(value))
'''
.. attribute:: topValue
Top end (pole) value on interpolation axis.
:type: int, float
'''
GSSmartComponentAxis.bottomValue = property(lambda self: self.pyobjc_instanceMethods.bottomValue(),
lambda self, value: self.setBottomValue_(value))
'''
.. attribute:: bottomValue
Bottom end (pole) value on interpolation axis.
:type: int, float
'''
##################################################################################
#
#
#
# GSPath
#
#
#
##################################################################################
def ________________(): pass
def ____GSPath____(): pass
def ________________(): pass
'''
:mod:`GSPath`
===============================================================================
Implementation of the path object.
For details on how to access them, please see :attr:`GSLayer.paths`
If you build a path in code, make sure that the structure is valid. A curve node has to be preceded by two off-curve nodes. And an open path has to start with a line node.
.. class:: GSPath()
Properties
.. autosummary::
parent
nodes
segments
closed
direction
bounds
selected
bezierPath
Functions
.. autosummary::
reverse()
addNodesAtExtremes()
applyTransform()
**Properties**
'''
GSPath.__new__ = staticmethod(GSObject__new__)
def Path__init__(self):
pass
GSPath.__init__ = Path__init__
def Path__repr__(self):
return "<GSPath %s nodes and %s segments>" % (len(self.nodes), len(self.segments))
GSPath.__repr__ = python_method(Path__repr__)
GSPath.mutableCopyWithZone_ = GSObject__copy__
GSPath.parent = property(lambda self: self.pyobjc_instanceMethods.parent(),
lambda self, value: self.setParent_(value))
'''
.. attribute:: parent
Reference to the :class:`layer <GSLayer>` object.
:type: :class:`GSLayer`
'''
GSPath.nodes = property(lambda self: PathNodesProxy(self),
lambda self, value: PathNodesProxy(self).setter(value))
'''
.. attribute:: nodes
A list of :class:`GSNode` objects
:type: list
.. code-block:: python
# access all nodes
for path in layer.paths:
for node in path.nodes:
print(node)
'''
GSPath.segments = property(lambda self: self.pyobjc_instanceMethods.segments(),
lambda self, value: self.setSegments_(value))
'''
.. attribute:: segments
A list of segments as NSPoint objects. Two objects represent a line, four represent a curve. Start point of the segment is included.
:type: list
.. code-block:: python
# access all segments
for path in layer.paths:
for segment in path.segments:
print(segment)
'''
GSPath.closed = property(lambda self: bool(self.pyobjc_instanceMethods.closed()),
lambda self, value: self.setClosed_(value))
'''
.. attribute:: closed
Returns True if the the path is closed
:type: bool
'''
GSPath.direction = property(lambda self: self.pyobjc_instanceMethods.direction())
'''
.. attribute:: direction
Path direction. -1 for counter clockwise, 1 for clockwise.
:type: int
'''
GSPath.bounds = property(lambda self: self.pyobjc_instanceMethods.bounds())
'''
.. attribute:: bounds
Bounding box of the path, read-only
:type: NSRect
.. code-block:: python
path = layer.paths[0] # first path
# origin
print(path.bounds.origin.x, path.bounds.origin.y)
# size
print(path.bounds.size.width, path.bounds.size.height)
'''
def Path_selected(self):
return set(self.nodes) <= set(self.parent.selection)
def Path_SetSelected(self, state):
layer = self.parent
if state:
layer.addObjectsFromArrayToSelection_(self.pyobjc_instanceMethods.nodes())
else:
layer.removeObjectsFromSelection_(self.pyobjc_instanceMethods.nodes())
GSPath.selected = property(lambda self: Path_selected(self), lambda self, value: Path_SetSelected(self, value))
'''
.. attribute:: selected
Selection state of path in UI.
.. code-block:: python
# select path
layer.paths[0].selected = True
# print(selection state)
print(layer.paths[0].selected)
:type: bool
'''
GSPath.bezierPath = property(lambda self: self.pyobjc_instanceMethods.bezierPath())
'''
.. attribute:: bezierPath
.. versionadded:: 2.3
The same path as an NSBezierPath object. Useful for drawing glyphs in plugins.
.. code-block:: python
# draw the path into the Edit view
NSColor.redColor().set()
layer.paths[0].bezierPath.fill()
:type: NSBezierPath
**Functions**
.. function:: reverse()
Reverses the path direction
'''
def DrawPathWithPen(self, pen):
"""draw the object with a fontTools pen"""
Start = 0
if self.closed:
for i in range(len(self) - 1, -1, -1):
StartNode = self.nodeAtIndex_(i)
GS_Type = StartNode.pyobjc_instanceMethods.type()
if GS_Type is not GSOFFCURVE_:
pen.moveTo(StartNode.pyobjc_instanceMethods.position())
break
else:
for i in range(len(self)):
StartNode = self.nodeAtIndex_(i)
GS_Type = StartNode.pyobjc_instanceMethods.type()
if GS_Type is not GSOFFCURVE_:
pen.moveTo(StartNode.pyobjc_instanceMethods.position())
Start = i + 1
break
for i in range(Start, len(self), 1):
Node = self.nodeAtIndex_(i)
GS_Type = Node.pyobjc_instanceMethods.type()
if GS_Type == GSLINE_:
pen.lineTo(Node.pyobjc_instanceMethods.position())
elif GS_Type == GSCURVE_:
pen.curveTo(self.nodeAtIndex_(i - 2).pyobjc_instanceMethods.position(), self.nodeAtIndex_(i - 1).pyobjc_instanceMethods.position(), Node.pyobjc_instanceMethods.position())
if self.closed:
pen.closePath()
else:
pen.endPath()
return
GSPath.draw = DrawPathWithPen
def __GSPath__drawPoints__(self, pen):
'''draw the object with a fontTools pen'''
pen.beginPath()
for i in range(len(self)):
Node = self.nodeAtIndex_(i)
node_type = Node.type
if Node.type == GSOFFCURVE:
node_type = None
pen.addPoint(Node.position, segmentType=node_type, smooth=Node.smooth, name=Node.name)
pen.endPath()
GSPath.drawPoints = __GSPath__drawPoints__
def Path_addNodesAtExtremes(self, force=False):
self.addExtremes_(force)
GSPath.addNodesAtExtremes = Path_addNodesAtExtremes
'''
.. function:: addNodesAtExtremes()
Add nodes at path's extrema, e.g., top, bottom etc.
.. versionadded:: 2.3
'''
def __CGPath_applyTransform__(self, transformStruct):
Transform = NSAffineTransform.transform()
Transform.setTransformStruct_(transformStruct)
for node in self.nodes:
node.position = Transform.transformPoint_(node.positionPrecise())
GSPath.applyTransform = __CGPath_applyTransform__
'''
.. function:: applyTransform
Apply a transformation matrix to the path.
.. code-block:: python
path = layer.paths[0]
path.applyTransform((
0.5, # x scale factor
0.0, # x skew factor
0.0, # y skew factor
0.5, # y scale factor
0.0, # x position
0.0 # y position
))
'''
##################################################################################
#
#
#
# GSNode
#
#
#
##################################################################################
def ________________(): pass
def ____GSNode____(): pass
def ________________(): pass
'''
:mod:`GSNode`
===============================================================================
Implementation of the node object.
For details on how to access them, please see :attr:`GSPath.nodes`
.. class:: GSNode([pt, type = type])
:param pt: The position of the node.
:param type: The type of the node, LINE, CURVE or OFFCURVE
Properties
.. autosummary::
position
type
connection
selected
index
nextNode
prevNode
name
Functions
.. autosummary::
makeNodeFirst()
toggleConnection()
**Properties**
'''
def Node__init__(self, pt=None, type=None, x=None, y=None, name=None, pointType=None):
if type is None and pointType is not None:
type = pointType
if pt:
self.setPosition_(pt)
elif x is not None and y is not None:
self.setPosition_((x, y))
if type:
self.type = type
if name:
self.name = name
GSNode.__init__ = Node__init__
def Node__repr__(self):
NodeType = self.type
if self.type != OFFCURVE and self.smooth:
NodeType += " smooth"
return "<GSNode x=%s y=%s %s>" % (self.position.x, self.position.y, NodeType)
GSNode.__repr__ = python_method(Node__repr__)
GSNode.mutableCopyWithZone_ = GSObject__copy__
GSNode.position = property(lambda self: self.pyobjc_instanceMethods.position(),
lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
The position of the node.
:type: NSPoint
'''
def __GSNode_get_type__(self):
GS_Type = self.pyobjc_instanceMethods.type()
if GS_Type == GSMOVE_:
return MOVE
elif GS_Type == GSOFFCURVE_:
return OFFCURVE
elif GS_Type == GSCURVE_:
return CURVE
elif GS_Type == GSQCURVE_:
return QCURVE
else:
return LINE
def __GSNode_set_type__(self, value):
if value == MOVE:
self.setType_(GSLINE_)
elif value == LINE:
self.setType_(GSLINE_)
elif value == OFFCURVE:
self.setType_(GSOFFCURVE_)
elif value == CURVE:
self.setType_(GSCURVE_)
elif value == QCURVE:
self.setType_(GSQCURVE_)
GSNode.type = property(__GSNode_get_type__, __GSNode_set_type__, doc="")
'''
.. attribute:: type
The type of the node, LINE, CURVE or OFFCURVE
Always compare against the constants, never against the actual value.
:type: str
'''
def __GSNode__get_smooth(self):
return self.connection == GSSMOOTH
def __GSNode__set_smooth(self, value):
if value is True:
self.setConnection_(GSSMOOTH)
else:
self.setConnection_(GSSHARP)
GSNode.smooth = property(__GSNode__get_smooth, __GSNode__set_smooth, doc="")
'''
.. attribute:: smooth
If it is a smooth connection or not
:type: BOOL
.. versionadded:: 2.3
'''
def __GSNode_get_connection(self):
GS_Type = self.pyobjc_instanceMethods.connection()
if GS_Type == GSSHARP:
return GSSHARP
else:
return GSSMOOTH
def __GSNode_set_connection(self, value):
if value == GSSHARP:
self.setConnection_(GSSHARP)
else:
self.setConnection_(GSSMOOTH)
GSNode.connection = property(__GSNode_get_connection, __GSNode_set_connection, doc="")
'''
.. attribute:: connection
The type of the connection, SHARP or SMOOTH
:type: string
.. deprecated:: 2.3
Use :attr:`smooth <GSNode.smooth>` instead.
'''
GSNode.parent = property(lambda self: self.pyobjc_instanceMethods.parent())
GSNode.layer = property(lambda self: self.pyobjc_instanceMethods.layer())
'''
.. attribute:: selected
Selection state of node in UI.
.. code-block:: python
# select node
layer.paths[0].nodes[0].selected = True
# print(selection state)
print(layer.paths[0].nodes[0].selected)
:type: bool
'''
def __GSNode__index__(self):
try:
return self.parent.indexOfNode_(self)
except:
return NSNotFound
GSNode.index = property(lambda self: __GSNode__index__(self))
'''
.. attribute:: index
Returns the index of the node in the containing path or maxint if it is not in a path.
:type: int
.. versionadded:: 2.3
'''
def __GSNode__nextNode__(self):
try:
index = self.parent.indexOfNode_(self)
if index == (len(self.parent.nodes) - 1):
return self.parent.nodes[0]
elif index < len(self.parent.nodes):
return self.parent.nodes[index + 1]
except:
pass
return None
GSNode.nextNode = property(lambda self: __GSNode__nextNode__(self))
'''
.. attribute:: nextNode
Returns the next node in the path.
Please note that this is regardless of the position of the node in the path and will jump across the path border to the beginning of the path if the current node is the last.
If you need to take into consideration the position of the node in the path, use the node’s index attribute and check it against the path length.
.. code-block:: python
print(layer.paths[0].nodes[0].nextNode # returns the second node in the path (index 0 + 1))
print(layer.paths[0].nodes[-1].nextNode # returns the first node in the path (last node >> jumps to beginning of path))
# check if node is last node in path (with at least two nodes)
print(layer.paths[0].nodes[0].index == (len(layer.paths[0].nodes) - 1)) # returns False for first node
print(layer.paths[0].nodes[-1].index == (len(layer.paths[0].nodes) - 1)) # returns True for last node
:type: GSNode
.. versionadded:: 2.3
'''
def __GSNode__prevNode__(self):
try:
index = self.parent.indexOfNode_(self)
if index == 0:
return self.parent.nodes[-1]
elif index < len(self.parent.nodes):
return self.parent.nodes[index - 1]
except:
pass
return None
GSNode.prevNode = property(lambda self: __GSNode__prevNode__(self))
'''
.. attribute:: prevNode
Returns the previous node in the path.
Please note that this is regardless of the position of the node in the path, and will jump across the path border to the end of the path if the current node is the first.
If you need to take into consideration the position of the node in the path, use the node’s index attribute and check it against the path length.
.. code-block:: python
print(layer.paths[0].nodes[0].prevNode) # returns the last node in the path (first node >> jumps to end of path)
print(layer.paths[0].nodes[-1].prevNode) # returns second last node in the path
# check if node is first node in path (with at least two nodes)
print(layer.paths[0].nodes[0].index == 0) # returns True for first node
print(layer.paths[0].nodes[-1].index == 0) # returns False for last node
:type: GSNode
.. versionadded:: 2.3
'''
def __GSNode__get_name(self):
try:
return self.userDataForKey_("name")
except:
pass
return None
def __GSNode__set_name(self, value):
if value is None or isString(value):
self.setUserData_forKey_(value, "name")
else:
raise(ValueError)
GSNode.name = property(__GSNode__get_name, __GSNode__set_name, doc="")
'''
.. attribute:: name
Attaches a name to a node.
:type: unicode
.. versionadded:: 2.3
'''
GSNode.userData = property(lambda self: UserDataProxy(self))
'''
.. attribute:: userData
A dictionary to store user data. Use a unique key and only use objects that can be stored in a property list (string, list, dict, numbers, NSData) otherwise the data will not be recoverable from the saved file.
:type: dict
.. code-block:: python
# set value
node.userData['rememberToMakeCoffee'] = True
# delete value
del node.userData['rememberToMakeCoffee']
.. versionadded:: 2.4.1
**Functions**
.. function:: makeNodeFirst()
Turn this node into the start point of the path.
.. function:: toggleConnection()
Toggle between sharp and smooth connections.
'''
##################################################################################
#
#
#
# GSGuideLine
#
#
#
##################################################################################
def ______________________(): pass
def ____GSGuideline____(): pass
def ______________________(): pass
'''
:mod:`GSGuideLine`
===============================================================================
Implementation of the guide object.
For details on how to access them, please see :attr:`GSLayer.guides`
.. class:: GSGuideLine()
**Properties**
.. autosummary::
position
angle
name
selected
locked
'''
def GuideLine__init__(self):
pass
GSGuideLine.__init__ = GuideLine__init__
def GuideLine__repr__(self):
return "<GSGuideLine x=%s y=%s angle=%s>" % (self.position.x, self.position.y, self.angle)
GSGuideLine.__repr__ = python_method(GuideLine__repr__)
GSGuideLine.mutableCopyWithZone_ = GSObject__copy__
GSGuideLine.position = property(lambda self: self.pyobjc_instanceMethods.position(),
lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
The position of the node.
:type: NSPoint
'''
GSGuideLine.angle = property(lambda self: self.pyobjc_instanceMethods.angle(),
lambda self, value: self.setAngle_(float(value)))
'''
.. attribute:: angle
Angle
:type: float
'''
GSGuideLine.name = property(lambda self: self.pyobjc_instanceMethods.name(),
lambda self, value: self.setName_(value))
'''
.. attribute:: name
a optional name
:type: unicode
.. attribute:: selected
Selection state of guideline in UI.
.. code-block:: python
# select guideline
layer.guidelines[0].selected = True
# print(selection state)
print(layer.guidelines[0].selected)
:type: bool
'''
GSGuideLine.locked = property(lambda self: bool(self.pyobjc_instanceMethods.locked()),
lambda self, value: self.setLocked_(value))
'''
.. attribute:: locked
Locked
:type: bool
'''
##################################################################################
#
#
#
# GSAnnotation
#
#
#
##################################################################################
def _______________________(): pass
def ____GSAnnotation____(): pass
def _______________________(): pass
'''
:mod:`GSAnnotation`
===============================================================================
Implementation of the annotation object.
For details on how to access them, please see :class:`GSLayer.annotations`
.. class:: GSAnnotation()
.. autosummary::
position
type
text
angle
width
**Properties**
'''
GSAnnotation.__new__ = staticmethod(GSObject__new__)
def Annotation__init__(self):
pass
GSAnnotation.__init__ = Annotation__init__
def Annotation__repr__(self):
TypeName = "n/a"
if (self.type == TEXT):
TypeName = "Text"
elif (self.type == ARROW):
TypeName = "Arrow"
elif (self.type == CIRCLE):
TypeName = "Circle"
elif (self.type == PLUS):
TypeName = "Plus"
elif (self.type == MINUS):
TypeName = "Minus"
return "<%s %s x=%s y=%s>" % (self.className(), TypeName, self.position.x, self.position.y)
GSAnnotation.__repr__ = python_method(Annotation__repr__)
GSAnnotation.mutableCopyWithZone_ = GSObject__copy__
GSAnnotation.position = property(lambda self: self.pyobjc_instanceMethods.position(),
lambda self, value: self.setPosition_(value))
'''
.. attribute:: position
The position of the annotation.
:type: NSPoint
'''
GSAnnotation.type = property(lambda self: self.pyobjc_instanceMethods.type(),
lambda self, value: self.setType_(value))
'''
.. attribute:: type
The type of the annotation.
Available constants are:
:const:`TEXT`
:const:`ARROW`
:const:`CIRCLE`
:const:`PLUS`
:const:`MINUS`
:type: int
'''
GSAnnotation.text = property(lambda self: self.pyobjc_instanceMethods.text(),
lambda self, value: self.setText_(value))
'''
.. attribute:: text
The content of the annotation. Only useful if type == TEXT
:type: unicode
'''
GSAnnotation.angle = property(lambda self: self.pyobjc_instanceMethods.angle(),
lambda self, value: self.setAngle_(value))
'''
.. attribute:: angle
The angle of the annotation.
:type: float
'''
GSAnnotation.width = property(lambda self: self.pyobjc_instanceMethods.width(),
lambda self, value: self.setWidth_(value))
'''
.. attribute:: width
The width of the annotation.
:type: float
'''
##################################################################################
#
#
#
# GSHint
#
#
#
##################################################################################
def ________________(): pass
def ____GSHint____(): pass
def ________________(): pass
'''
:mod:`GSHint`
===============================================================================
Implementation of the hint object.
For details on how to access them, please see :class:`GSLayer.hints`
.. class:: GSHint()
.. autosummary::
parent
originNode
targetNode
otherNode1
otherNode2
type
horizontal
selected
**Properties**
'''
GSHint.__new__ = staticmethod(GSObject__new__)
def Hint__init__(self):
pass
GSHint.__init__ = Hint__init__
def Hint__origin__pos(self):
if (self.originNode):
if self.horizontal:
return self.originNode.position.y
else:
return self.originNode.position.x
return self.pyobjc_instanceMethods.origin()
def Hint__width__pos(self):
if (self.targetNode):
if self.horizontal:
return self.targetNode.position.y
else:
return self.targetNode.position.x
width = self.pyobjc_instanceMethods.width()
if width > 100000:
width = 0
return width
def Hint__repr__(self):
if self.isTrueType():
return self.description()
if self.horizontal:
direction = "hori"
else:
direction = "vert"
if self.type == BOTTOMGHOST or self.type == TOPGHOST:
return "<GSHint %s origin=(%s)>" % (hintConstants[self.type], self.position)
elif self.type == STEM:
return "<GSHint %s Stem origin=(%s) target=(%s)>" % (direction, self.position, self.width)
elif self.type == CORNER or self.type == CAP:
return "<GSHint %s %s>" % (hintConstants[self.type], self.name)
else:
return "<GSHint %s %s>" % (hintConstants[self.type], direction)
GSHint.__repr__ = python_method(Hint__repr__)
GSHint.mutableCopyWithZone_ = GSObject__copy__
GSHint.parent = property(lambda self: self.pyobjc_instanceMethods.parent())
'''
.. attribute:: parent
Parent layer of hint.
:type: GSLayer
.. versionadded:: 2.4.2
'''
GSHint.scale = property(lambda self: self.pyobjc_instanceMethods.scale(),
lambda self, value: self.setScale_(value))
GSHint.originNode = property(lambda self: self.pyobjc_instanceMethods.originNode(),
lambda self, value: self.setOriginNode_(value))
'''
.. attribute:: originNode
The first node the hint is attached to.
:type: :class:`GSNode`
'''
GSHint.position = property(lambda self: Hint__origin__pos(self),
lambda self, value: self.setOrigin_(value))
GSHint.width = property(lambda self: Hint__width__pos(self),
lambda self, value: self.setOrigin_(value))
def __indexPathToIndexes__(indexPath):
if indexPath is not None:
indexes = []
for idx in range(len(indexPath)):
indexes.append(indexPath.indexAtPosition_(idx))
return indexes
return None
GSHint.origin = property(lambda self: __indexPathToIndexes__(self.originIndex()))
GSHint.target = property(lambda self: __indexPathToIndexes__(self.targetIndex()))
GSHint.other1 = property(lambda self: __indexPathToIndexes__(self.otherIndex1()))
GSHint.other2 = property(lambda self: __indexPathToIndexes__(self.otherIndex2()))
GSHint.targetNode = property(lambda self: self.pyobjc_instanceMethods.targetNode(),
lambda self, value: self.setTargetNode_(value))
'''
.. attribute:: targetNode
The the second node this hint is attached to. In the case of a ghost hint, this value will be empty.
:type: :class:`GSNode`
'''
GSHint.otherNode1 = property(lambda self: self.valueForKey_("otherNode1"),
lambda self, value: self.setOtherNode1_(value))
'''
.. attribute:: otherNode1
A third node this hint is attached to. Used for Interpolation or Diagonal hints.
:type: :class:`GSNode`'''
GSHint.otherNode2 = property(lambda self: self.valueForKey_("otherNode2"),
lambda self, value: self.setOtherNode2_(value))
'''
.. attribute:: otherNode2
A fourth node this hint is attached to. Used for Diagonal hints.
:type: :class:`GSNode`'''
GSHint.type = property(lambda self: self.pyobjc_instanceMethods.type(),
lambda self, value: self.setType_(value))
'''
.. attribute:: type
See Constants section at the bottom of the page.
:type: int'''
GSHint.options = property(lambda self: self.pyobjc_instanceMethods.options(),
lambda self, value: self.setOptions_(value))
'''
.. attribute:: options
Stores extra options for the hint. For TT hints, that might be the rounding settings.
See Constants section at the bottom of the page.
:type: int'''
GSHint.horizontal = property(lambda self: self.pyobjc_instanceMethods.horizontal(),
lambda self, value: self.setHorizontal_(value))
'''
.. attribute:: horizontal
True if hint is horizontal, False if vertical.
:type: bool'''
'''
.. attribute:: selected
Selection state of hint in UI.
.. code-block:: python
# select hint
layer.hints[0].selected = True
# print(selection state)
print(layer.hints[0].selected)
:type: bool
'''
GSHint.name = property(lambda self: self.pyobjc_instanceMethods.name(), lambda self, value: self.setName_(objcObject(value)))
'''
.. attribute:: name
.. versionadded:: 2.3.1
Name of the hint. This is the referenced glyph for corner and cap components.
:type: string'''
def GSHint__stem__(self):
value = self.pyobjc_instanceMethods.stem()
stems = self.parent.master.customParameters['TTFStems']
if stems and -1 <= value <= (len(stems) - 1):
return value
else:
return -2
def GSHint__setStem__(self, value):
stems = self.parent.master.customParameters['TTFStems']
if not stems:
raise ValueError('The master of this layer has no defined "TTFStems" custom parameter')
if stems and -1 <= value <= (len(stems) - 1):
self.pyobjc_instanceMethods.setStem_(value)
elif value == -2:
self.pyobjc_instanceMethods.setStem_(sys.maxint)
else:
raise ValueError('Wrong value. Stem values can be indices of TT stems ("TTFStems" master custom parameter) or -1 for no stem or -2 for automatic.')
GSHint.stem = property(lambda self: GSHint__stem__(self),
lambda self, value: GSHint__setStem__(self, value))
'''
.. attribute:: stem
.. versionadded:: 2.4.2
Index of TrueType stem that this hint is attached to. The stems are defined in the custom parameter "TTFStems" per master.
For no stem, value is -1.
For automatic, value is -2.
:type: integer'''
##################################################################################
#
#
#
# GSBackgroundImage
#
#
#
##################################################################################
def ______________________________(): pass
def ____GSBackgroundImage____(): pass
def ______________________________(): pass
'''
:mod:`GSBackgroundImage`
===============================================================================
Implementation of background image.
For details on how to access it, please see :class:`GSLayer.backgroundImage`
.. class:: GSBackgroundImage([path])
:param path: Initialize with an image file (optional)
Properties
.. autosummary::
path
image
crop
locked
position
scale
rotation
transform
alpha
Functions
.. autosummary::
resetCrop()
scaleWidthToEmUnits()
scaleHeightToEmUnits()
**Properties**
'''
def BackgroundImage__init__(self, path=None):
if path:
self.setImagePath_(path)
self.loadImage()
GSBackgroundImage.__init__ = BackgroundImage__init__
def BackgroundImage__repr__(self):
return "<GSBackgroundImage '%s'>" % self.imagePath()
GSBackgroundImage.__repr__ = python_method(BackgroundImage__repr__)
GSBackgroundImage.mutableCopyWithZone_ = GSObject__copy__
def BackgroundImage_setPath(self, path):
self.setImagePath_(path)
self.loadImage()
GSBackgroundImage.path = property(lambda self: self.pyobjc_instanceMethods.imagePath(),
lambda self, value: BackgroundImage_setPath(self, value))
'''
.. attribute:: path
Path to image file.
:type: unicode
'''
GSBackgroundImage.image = property(lambda self: self.pyobjc_instanceMethods.image())
'''
.. attribute:: image
:class:`NSImage` object of background image, read-only (as in: not settable)
:type: :class:`NSImage`
'''
GSBackgroundImage.crop = property(lambda self: self.pyobjc_instanceMethods.crop(),
lambda self, value: self.setCrop_(value))
'''
.. attribute:: crop
Crop rectangle. This is relative to the image size in pixels, not the font's em units (just in case the image is scaled to something other than 100%).
:type: :class:`NSRect`
.. code-block:: python
# change cropping
layer.backgroundImage.crop = NSRect(NSPoint(0, 0), NSPoint(1200, 1200))
'''
GSBackgroundImage.locked = property(lambda self: bool(self.pyobjc_instanceMethods.locked()),
lambda self, value: self.setLocked_(value))
'''
.. attribute:: locked
Defines whether image is locked for access in UI.
:type: bool
'''
GSBackgroundImage.alpha = property(lambda self: self.pyobjc_instanceMethods.alpha(),
lambda self, value: self.setAlpha_(value))
'''
.. attribute:: alpha
Defines the transparence of the image in the Edit view. Default is 50%, possible values are 10–100.
To reset it to default, set it to anything other than the allowed values.
:type: int
.. versionadded:: 2.3
'''
def BackgroundImage_getPosition(self):
return NSPoint(self.transform[4], self.transform[5])
def BackgroundImage_setPosition(self, pos):
self.transform = ((self.transform[0], self.transform[1], self.transform[2], self.transform[3], pos.x, pos.y))
GSBackgroundImage.position = property(lambda self: BackgroundImage_getPosition(self),
lambda self, value: BackgroundImage_setPosition(self, value))
'''
.. attribute:: position
Position of image in font units.
:type: :class:`NSPoint`
.. code-block:: python
# change position
layer.backgroundImage.position = NSPoint(50, 50)
'''
def BackgroundImage_getScale(self):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
return (x, y)
def BackgroundImage_setScale(self, scale):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
if type(scale) == tuple:
self.setScaleX_scaleY_rotation_(scale[0], scale[1], r)
elif type(scale) == int or type(scale) == float:
self.setScaleX_scaleY_rotation_(scale, scale, r)
GSBackgroundImage.scale = property(lambda self: BackgroundImage_getScale(self),
lambda self, value: BackgroundImage_setScale(self, value))
'''
.. attribute:: scale
Scale factor of image.
A scale factor of 1.0 (100%) means that 1 font unit is equal to 1 point.
Set the scale factor for x and y scale simultaneously with an integer or a float value. For separate scale factors, please use a tuple.
.. code-block:: python
# change scale
layer.backgroundImage.scale = 1.2 # changes x and y to 120%
layer.backgroundImage.scale = (1.1, 1.2) # changes x to 110% and y to 120%
:type: tuple
'''
def BackgroundImage_getRotation(self):
(x, y, rotation) = self.getScaleX_scaleY_rotation_(None, None, None)
return rotation
def BackgroundImage_setRotation(self, rotation):
(x, y, r) = self.getScaleX_scaleY_rotation_(None, None, None)
self.setScaleX_scaleY_rotation_(x, y, rotation)
GSBackgroundImage.rotation = property(lambda self: BackgroundImage_getRotation(self),
lambda self, value: BackgroundImage_setRotation(self, value))
'''
.. attribute:: rotation
Rotation angle of image.
:type: float
'''
GSBackgroundImage.transform = property(lambda self: self.pyobjc_instanceMethods.transformStruct(),
lambda self, value: self.setTransformStruct_(value))
'''
.. attribute:: transform
Transformation matrix.
:type: :class:`NSAffineTransformStruct`
.. code-block:: python
# change transformation
layer.backgroundImage.transform = ((
1.0, # x scale factor
0.0, # x skew factor
0.0, # y skew factor
1.0, # y scale factor
0.0, # x position
0.0 # y position
))
**Functions**
'''
def BackgroundImage_resetCrop(self):
self.crop = NSRect(NSPoint(0, 0), self.image.size())
GSBackgroundImage.resetCrop = BackgroundImage_resetCrop
'''
.. function:: resetCrop
Resets the cropping to the image's original dimensions.
'''
def BackgroundImage_scaleWidthToEmUnits(self, value):
self.scale = float(value) / float(self.crop.size.width)
GSBackgroundImage.scaleWidthToEmUnits = BackgroundImage_scaleWidthToEmUnits
'''
.. function:: scaleWidthToEmUnits
Scale the image's cropped width to a certain em unit value, retaining its aspect ratio.
.. code-block:: python
# fit image in layer's width
layer.backgroundImage.scaleWidthToEmUnits(layer.width)
'''
def BackgroundImage_scaleHeightToEmUnits(self, value):
self.scale = float(value) / float(self.crop.size.height)
GSBackgroundImage.scaleHeightToEmUnits = BackgroundImage_scaleHeightToEmUnits
'''
.. function:: scaleHeightToEmUnits
Scale the image's cropped height to a certain em unit value, retaining its aspect ratio.
.. code-block:: python
# position image's origin at descender line
layer.backgroundImage.position = NSPoint(0, font.masters[0].descender)
# scale image to UPM value
layer.backgroundImage.scaleHeightToEmUnits(font.upm)
'''
##################################################################################
#
#
#
# GSEditViewController
#
#
#
##################################################################################
def _______________________________(): pass
def ____GSEditViewController____(): pass
def _______________________________(): pass
'''
:mod:`GSEditViewController`
===============================================================================
Implementation of the GSEditViewController object, which represents Edit tabs in the UI.
For details on how to access them, please look at :class:`GSFont.tabs`
.. class:: GSEditViewController()
Properties
.. autosummary::
parent
text
layers
composedLayers
scale
viewPort
bounds
selectedLayerOrigin
textCursor
textRange
direction
features
previewInstances
previewHeight
bottomToolbarHeight
masterIndex
Functions
.. autosummary::
close()
saveToPDF()
**Properties**
'''
GSEditViewController.parent = property(lambda self: self.representedObject())
'''
.. attribute:: parent
The :class:`GSFont` object that this tab belongs to.
:type: :class:`GSFont`
'''
GSEditViewController.text = property(lambda self: self.graphicView().displayStringSave_(False),
lambda self, value: self.graphicView().setDisplayString_(value))
'''
.. attribute:: text
The text of the tab, either as text, or slash-escaped glyph names, or mixed. OpenType features will be applied after the text has been changed.
:type: Unicode
'''
def __GSEditViewController__repr__(self):
nameString = self.text
if len(nameString) > 30:
nameString = nameString[:30] + '...'
nameString = nameString.replace('\n', '\\n')
import codecs
return codecs.encode("<GSEditViewController %s>" % nameString, 'ascii', 'backslashreplace')
GSEditViewController.__repr__ = python_method(__GSEditViewController__repr__)
GSEditViewController.masterIndex = property(lambda self: self.pyobjc_instanceMethods.masterIndex(), lambda self, value: self.setMasterIndex_(value))
'''
.. attribute:: masterIndex
The index of the active master (selected in the toolbar).
:type: int
.. versionadded:: 2.6.1
'''
class TabLayersProxy (Proxy):
def __getitem__(self, idx):
if type(idx) == slice:
return self.values().__getitem__(idx)
else:
return self.values()[idx]
def deactivateFeatures(self):
self.savedFeatures = copy.copy(self._owner.features)
self._owner.features = []
def activateFeatures(self):
self._owner.features = self.savedFeatures
def setter(self, layers):
self.deactivateFeatures()
if not (type(layers) is list or type(layers) is tuple or "objectAtIndex_" in layers.__class__.__dict__ or type(layers) is type(self)):
raise ValueError
if type(layers) is type(self):
layers = layers.values()
string = NSMutableAttributedString.alloc().init()
Font = self._owner.representedObject()
for l in layers:
if l.className() == "GSLayer":
char = Font.characterForGlyph_(l.parent)
A = NSAttributedString.alloc().initWithString_attributes_(NSString.stringWithChar_(char), {"GSLayerIdAttrib": l.layerId})
elif l.className() == "GSBackgroundLayer":
char = Font.characterForGlyph_(l.parent)
A = NSAttributedString.alloc().initWithString_attributes_(NSString.stringWithChar_(char), {"GSLayerIdAttrib": l.layerId, "GSShowBackgroundAttrib": True})
elif l.className() == "GSControlLayer":
char = l.parent.unicodeChar()
A = NSAttributedString.alloc().initWithString_(NSString.stringWithChar_(char))
else:
raise ValueError
string.appendAttributedString_(A)
self._owner.graphicView().textStorage().setText_(string)
self.activateFeatures()
def composedLayers(self):
return list(self._owner.graphicView().layoutManager().cachedGlyphs())
def values(self):
self.deactivateFeatures()
layers = list(self._owner.graphicView().layoutManager().cachedGlyphs())
self.activateFeatures()
return layers
def append(self, value):
values = copy.copy(self.values())
values.append(value)
self.setter(values)
def remove(self, value):
values = self.values()
values.remove(value)
self.setter(values)
GSEditViewController.layers = property(lambda self: TabLayersProxy(self), lambda self, value: TabLayersProxy(self).setter(value))
'''
.. attribute:: layers
Alternatively, you can set (and read) a list of :class:`GSLayer` objects. These can be any of the layers of a glyph. OpenType features will be applied after the layers have been changed.
:type: list
.. code-block:: python
font.tabs[0].layers = []
# display all layers of one glyph next to each other
for layer in font.glyphs['a'].layers:
font.tabs[0].layers.append(layer)
# append line break
font.tabs[0].layers.append(GSControlLayer(10)) # 10 being the ASCII code of the new line character (\n)
'''
GSEditViewController.composedLayers = property(lambda self: TabLayersProxy(self).composedLayers())
'''
.. attribute:: composedLayers
Similar to the above, but this list contains the :class:`GSLayer` objects after the OpenType features have been applied (see :class:`GSEditViewController.features`). Read-only.
:type: list
.. versionadded:: 2.4
'''
GSEditViewController.scale = property(lambda self: self.graphicView().scale(), lambda self, value: self.graphicView().setScale_(value))
'''
.. attribute:: scale
Scale (zoom factor) of the Edit view. Useful for drawing activity in plugins.
The scale changes with every zoom step of the Edit view. So if you want to draw objects (e.g. text, stroke thickness etc.) into the Edit view at a constant size relative to the UI (e.g. constant text size on screen), you need to calculate the object's size relative to the scale factor. See example below.
.. code-block:: python
print(font.currentTab.scale)
0.414628537193
# Calculate text size
desiredTextSizeOnScreen = 10 #pt
scaleCorrectedTextSize = desiredTextSizeOnScreen / font.currentTab.scale
print(scaleCorrectedTextSize)
24.1179733255
:type: float
.. versionadded:: 2.3
'''
GSEditViewController.viewPort = property(lambda self: self.frameView().visibleRect(), lambda self, value: self.frameView().zoomViewToRect_(value))
'''
.. attribute:: viewPort
The visible area of the Edit view in screen pixel coordinates (view coordinates).
The NSRect’s origin value describes the top-left corner (top-right for RTL, both at ascender height) of the combined glyphs’ bounding box (see :attr:`bounds <GSEditViewController.bounds>`), which also serves as the origin of the view plane.
The NSRect’s size value describes the width and height of the visible area.
When using drawing methods such as the view-coordinate-relative method in the Reporter Plugin, use these coordinates.
.. code-block:: python
# The far corners of the Edit view:
# Lower left corner of the screen
x = font.currentTab.viewPort.origin.x
y = font.currentTab.viewPort.origin.y
# Top left corner of the screen
x = font.currentTab.viewPort.origin.x
y = font.currentTab.viewPort.origin.y + font.currentTab.viewPort.size.height
# Top right corner of the screen
x = font.currentTab.viewPort.origin.x + font.currentTab.viewPort.size.width
y = font.currentTab.viewPort.origin.y + font.currentTab.viewPort.size.height
# Bottom right corner of the screen
x = font.currentTab.viewPort.origin.x + font.currentTab.viewPort.size.width
y = font.currentTab.viewPort.origin.y
:type: NSRect
.. versionadded:: 2.3
'''
GSEditViewController.bounds = property(lambda self: self.frameView().glyphFrame())
'''
.. attribute:: bounds
Bounding box of all glyphs in the Edit view in view coordinate values.
:type: NSRect
.. versionadded:: 2.3
'''
GSEditViewController.selectedLayerOrigin = property(lambda self: self.graphicView().activePosition())
'''
.. attribute:: selectedLayerOrigin
Position of the active layer’s origin (0,0) relative to the origin of the view plane (see :attr:`bounds <GSEditViewController.bounds>`), in view coordinates.
:type: NSPoint
.. versionadded:: 2.3
'''
GSEditViewController.textCursor = property(lambda self: self.graphicView().selectedRange().location,
lambda self, value: self.graphicView().setSelectedRange_(NSRange(value, self.graphicView().selectedRange().length)))
'''
.. attribute:: textCursor
Position of text cursor in text, starting with 0.
:type: integer
.. versionadded:: 2.3
'''
GSEditViewController.textRange = property(lambda self: self.contentView().selectedRange().length,
lambda self, value: self.contentView().setSelectedRange_(NSRange(self.textCursor, value)))
'''
.. attribute:: textRange
Amount of selected glyphs in text, starting at cursor position (see above).
:type: integer
.. versionadded:: 2.3
'''
GSEditViewController.layersCursor = property(lambda self: self.graphicView().cachedLayerSelectionRange().location)
'''
.. attribute:: layersCursor
Position of cursor in the layers list, starting with 0.
.. seealso:: `GSEditViewController.layers`
:type: integer
.. versionadded:: 2.4
'''
GSEditViewController.direction = property(lambda self: self.writingDirection(), lambda self, value: self.setWritingDirection_(value))
'''
.. attribute:: direction
Writing direction.
Defined constants are: LTR (left to right), RTL (right to left), LTRTTB (left to right, vertical, top to bottom e.g. Mongolian), and RTLTTB (right to left, vertical, top to bottom e.g. Chinese, Japanese, Korean)
:type: integer
.. code-block:: python
font.currentTab.direction = RTL
.. versionadded:: 2.3
'''
class TabSelectedFeaturesProxy (Proxy):
def reflow(self):
self._owner.graphicView().reflow()
self._owner.graphicView().layoutManager().updateActiveLayer()
self._owner._updateFeaturePopup()
def setter(self, values):
if not (type(values) is list or type(values) is tuple or type(values) is type(self)):
raise TypeError
self._owner.pyobjc_instanceMethods.selectedFeatures().removeAllObjects()
if type(values) is type(self):
otherFeaturesProxy = values
values = list(otherFeaturesProxy.values())
for feature in values:
self.append(feature)
self.reflow()
def hasFeature(self, feature):
_hasFeature = False
for featureInFont in self._owner.parent.features:
if featureInFont.name == feature:
_hasFeature = True
if not _hasFeature:
LogError('Info: Feature "%s" not in font.\n' % (feature))
return _hasFeature
def append(self, feature):
if not isString(feature):
raise TypeError
if self.hasFeature(feature):
self._owner.selectedFeatures().append(feature)
self.reflow()
def extend(self, features):
if not isinstance(features, list):
raise TypeError
for feature in features:
if self.hasFeature(feature):
self._owner.selectedFeatures().append(feature)
self.reflow()
def remove(self, feature):
if not isString(feature):
raise TypeError
try:
self._owner.selectedFeatures().remove(feature)
except:
pass
self.reflow()
def values(self):
return self._owner.pyobjc_instanceMethods.selectedFeatures()
GSEditViewController.features = property(lambda self: TabSelectedFeaturesProxy(self), lambda self, value: TabSelectedFeaturesProxy(self).setter(value))
'''
.. attribute:: features
List of OpenType features applied to text in Edit view.
:type: list
.. code-block:: python
font.currentTab.features = ['locl', 'ss01']
.. versionadded:: 2.3
'''
# TODO documentation
class TempDataProxy(Proxy):
def __getitem__(self, Key):
return self._owner.tempData().get(Key, None)
def __setitem__(self, Key, Value):
if self._owner.tempData() is None:
self._owner.setTempData_(NSMutableDictionary.alloc().init())
self._owner.tempData()[Key] = Value
def __delitem__(self, Key):
del(self._owner.tempData()[Key])
def values(self):
if self._owner.tempData() is not None:
return self._owner.tempData().allValues()
return None
def __repr__(self):
return str(self._owner.tempData())
GSEditViewController.userData = property(lambda self: TempDataProxy(self))
def Get_ShowInPreview(self):
value = self.selectedInstance()
if value == -2:
value = 'live'
elif value == -1:
value = 'all'
else:
value = self.parent.instances[value]
return value
def Set_ShowInPreview(self, value):
if value == 'live':
self.setSelectedInstance_(-2)
elif value == 'all':
self.setSelectedInstance_(-1)
else:
self.setSelectedInstance_(self.parent.instances.index(value))
GSEditViewController.previewInstances = property(lambda self: Get_ShowInPreview(self), lambda self, value: Set_ShowInPreview(self, value))
'''
.. attribute:: previewInstances
Instances to show in the Preview area.
Values are ``'live'`` for the preview of the current content of the Edit view, ``'all'`` for interpolations of all instances of the current glyph, or individual GSInstance objects.
:type: string/GSInstance
.. code-block:: python
# Live preview of Edit view
font.currentTab.previewInstances = 'live'
# Text of Edit view shown in particular Instance interpolation (last defined instance)
font.currentTab.previewInstances = font.instances[-1]
# All instances of interpolation
font.currentTab.previewInstances = 'all'
.. versionadded:: 2.3
'''
GSEditViewController.previewHeight = property(lambda self: self.pyobjc_instanceMethods.previewHeight(), lambda self, value: self.setPreviewHeight_(value))
'''
.. attribute:: previewHeight
Height of the preview panel in the Edit view in pixels.
Needs to be set to 16 or higher for the preview panel to be visible at all. Will return 0 for a closed preview panel or the current size when visible.
:type: float
.. versionadded:: 2.3
'''
GSEditViewController.bottomToolbarHeight = property(lambda self: self.previewSplitView().frame().origin.y)
'''
.. attribute:: bottomToolbarHeight
Height of the little toolbar at the very bottom of the window. Read-only.
:type: float
.. versionadded:: 2.4
'''
'''
**Functions**
'''
def Close_Tab(self):
for i, tab in enumerate(self.parent.tabs):
if tab == self:
break
del self.parent.tabs[i]
GSEditViewController.close = Close_Tab
'''
.. function:: close()
Close this tab.
'''
def GSEditViewController_saveToPDF(self, path, rect=None):
if rect is None:
rect = self.viewPort
pdf = self.graphicView().dataWithPDFInsideRect_(rect)
pdf.writeToFile_atomically_(path, True)
GSEditViewController.saveToPDF = GSEditViewController_saveToPDF
'''
.. function:: saveToPDF(path[, rect])
Save the view to a PDF file.
:param path: Path to the file
:param rect: Optional. NSRect defining the view port. If omitted, :attr:`GSEditViewController.viewPort` will be used.
.. versionadded:: 2.4
'''
##################################################################################
#
#
#
# GSGlyphInfo
#
#
#
##################################################################################
def _____________________(): pass
def ____GSGlyphInfo____(): pass
def _____________________(): pass
GSGlyphInfo.__new__ = staticmethod(GSObject__new__)
def GSGlyphInfo__init__(self):
pass
GSGlyphInfo.__init__ = GSGlyphInfo__init__
def GSGlyphInfo__repr__(self):
return "<GSGlyphInfo '%s'>" % (self.name)
GSGlyphInfo.__repr__ = python_method(GSGlyphInfo__repr__)
'''
:mod:`GSGlyphInfo`
===============================================================================
Implementation of the GSGlyphInfo object.
This contains valuable information from the glyph database. See :class:`GSGlyphsInfo` for how to create these objects.
.. class:: GSGlyphInfo()
Properties
.. autosummary::
name
productionName
category
subCategory
components
accents
anchors
unicode
unicode2
script
index
sortName
sortNameKeep
desc
altNames
desc
**Properties**
'''
GSGlyphInfo.name = property(lambda self: self.pyobjc_instanceMethods.name())
'''
.. attribute:: name
Human-readable name of glyph ("nice name").
:type: unicode
'''
GSGlyphInfo.productionName = property(lambda self: self.pyobjc_instanceMethods.production())
'''
.. attribute:: productionName
Production name of glyph. Will return a value only if production name differs from nice name, otherwise None.
:type: unicode
'''
GSGlyphInfo.category = property(lambda self: self.pyobjc_instanceMethods.category())
'''
.. attribute:: category
This is mostly from the UnicodeData.txt file from unicode.org. Some corrections have been made (Accents, ...)
e.g: "Letter", "Number", "Punctuation", "Mark", "Separator", "Symbol", "Other"
:type: unicode
'''
GSGlyphInfo.subCategory = property(lambda self: self.pyobjc_instanceMethods.subCategory())
'''
.. attribute:: subCategory
This is mostly from the UnicodeData.txt file from unicode.org. Some corrections and additions have been made (Smallcaps, ...).
e.g: "Uppercase", "Lowercase", "Smallcaps", "Ligature", "Decimal Digit", ...
:type: unicode
'''
GSGlyphInfo.components = property(lambda self: self.pyobjc_instanceMethods.components())
'''
.. attribute:: components
This glyph may be composed of the glyphs returned as a list of :class:`GSGlyphInfo` objects.
:type: list
'''
GSGlyphInfo.accents = property(lambda self: self.pyobjc_instanceMethods.accents())
'''
.. attribute:: accents
This glyph may be combined with these accents, returned as a list of glyph names.
:type: list
'''
GSGlyphInfo.anchors = property(lambda self: self.pyobjc_instanceMethods.anchors())
'''
.. attribute:: anchors
Anchors defined for this glyph, as a list of anchor names.
:type: list
'''
GSGlyphInfo.unicode = property(lambda self: self.pyobjc_instanceMethods.unicode())
'''
.. attribute:: unicode
Unicode value
:type: unicode
'''
GSGlyphInfo.unicode2 = property(lambda self: self.pyobjc_instanceMethods.unicode2())
'''
.. attribute:: unicode2
a second unicode value it present
:type: unicode
'''
GSGlyphInfo.script = property(lambda self: self.pyobjc_instanceMethods.script())
'''
.. attribute:: script
Script of glyph, e.g: "latin", "cyrillic", "greek".
:type: unicode
'''
GSGlyphInfo.index = property(lambda self: self.pyobjc_instanceMethods.index())
'''
.. attribute:: index
Index of glyph in database. Used for sorting in UI.
:type: unicode
'''
GSGlyphInfo.sortName = property(lambda self: self.pyobjc_instanceMethods.sortName())
'''
.. attribute:: sortName
Alternative name of glyph used for sorting in UI.
:type: unicode
'''
GSGlyphInfo.sortNameKeep = property(lambda self: self.pyobjc_instanceMethods.sortNameKeep())
'''
.. attribute:: sortNameKeep
Alternative name of glyph used for sorting in UI, when using 'Keep Alternates Next to Base Glyph' from Font Info.
:type: unicode
'''
GSGlyphInfo.desc = property(lambda self: self.pyobjc_instanceMethods.desc())
'''
.. attribute:: desc
Unicode description of glyph.
:type: unicode
'''
GSGlyphInfo.altNames = property(lambda self: self.pyobjc_instanceMethods.altNames())
'''
.. attribute:: altNames
Alternative names for glyphs that are not used, but should be recognized (e.g., for conversion to nice names).
:type: unicode
'''
def ____________________(): pass
def ____METHODS____(): pass
def ____________________(): pass
def __GSPathPen_beginPath__(self, identifier=None, **kwargs):
self.beginPath_(identifier)
path = self.currentPath()
path.closed = True
GSPathPen.beginPath = __GSPathPen_beginPath__
def __GSPathPen_moveTo__(self, pt):
self.moveTo_(pt)
GSPathPen.moveTo = __GSPathPen_moveTo__
def __GSPathPen_lineTo__(self, pt):
self.lineTo_(pt)
GSPathPen.lineTo = __GSPathPen_lineTo__
def __GSPathPen_curveTo__(self, off1, off2, pt):
self.curveTo_off1_off2_(pt, off1, off2)
GSPathPen.curveTo = __GSPathPen_curveTo__
def __GSPathPen_addPoint__(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
node = GSNode()
node.position = pt
path = self.currentPath()
if segmentType == "move":
path.closed = False
elif segmentType is not None:
node.type = segmentType
else:
node.type = OFFCURVE
if smooth:
node.smooth = True
if name is not None:
node.name = name
path.nodes.append(node)
GSPathPen.addPoint = __GSPathPen_addPoint__
def __PathOperator_removeOverlap__(paths):
try:
Paths = NSMutableArray.arrayWithArray_(paths)
except:
Paths = NSMutableArray.arrayWithArray_(paths.values())
result = GSPathFinder.alloc().init().removeOverlapPaths_error_(Paths, None)
if result[0] != 1:
return None
return Paths
removeOverlap = __PathOperator_removeOverlap__
def __PathOperator_subtractPaths__(paths, subtract):
try:
Paths = NSMutableArray.arrayWithArray_(paths)
except:
Paths = NSMutableArray.arrayWithArray_(paths.values())
try:
Subtract = NSMutableArray.arrayWithArray_(subtract)
except:
Subtract = NSMutableArray.arrayWithArray_(subtract.values())
result = GSPathFinder.alloc().init().subtractPaths_from_error_(Subtract, Paths, None)
if result[0] != 1:
return None
return Paths
subtractPaths = __PathOperator_subtractPaths__
def __PathOperator_intersectPaths__(paths, otherPaths):
try:
Paths = NSMutableArray.arrayWithArray_(paths)
except:
Paths = NSMutableArray.arrayWithArray_(paths.values())
try:
OtherPaths = NSMutableArray.arrayWithArray_(otherPaths)
except:
OtherPaths = NSMutableArray.arrayWithArray_(otherPaths.values())
result = GSPathFinder.alloc().init().intersectPaths_from_error_(Paths, OtherPaths, None)
if result[0] != 1:
return None
return OtherPaths
intersectPaths = __PathOperator_intersectPaths__
'''
Methods
=======
.. autosummary::
divideCurve()
distance()
addPoints()
subtractPoints()
GetOpenFile()
GetSaveFile()
GetFolder()
Message()
LogToConsole()
LogError()
'''
def divideCurve(P0, P1, P2, P3, t):
Q0x = P0[0] + ((P1[0] - P0[0]) * t)
Q0y = P0[1] + ((P1[1] - P0[1]) * t)
Q1x = P1[0] + ((P2[0] - P1[0]) * t)
Q1y = P1[1] + ((P2[1] - P1[1]) * t)
Q2x = P2[0] + ((P3[0] - P2[0]) * t)
Q2y = P2[1] + ((P3[1] - P2[1]) * t)
R0x = Q0x + ((Q1x - Q0x) * t)
R0y = Q0y + ((Q1y - Q0y) * t)
R1x = Q1x + ((Q2x - Q1x) * t)
R1y = Q1y + ((Q2y - Q1y) * t)
Sx = R0x + ((R1x - R0x) * t)
Sy = R0y + ((R1y - R0y) * t)
return (P0, NSMakePoint(Q0x, Q0y), NSMakePoint(R0x, R0y), NSMakePoint(Sx, Sy), NSMakePoint(R1x, R1y), NSMakePoint(Q2x, Q2y), P3)
'''
.. function:: divideCurve(P0, P1, P2, P3, t)
Divides the curve using the De Casteljau's algorithm.
:param P0: The Start point of the Curve (NSPoint)
:param P1: The first off curve point
:param P2: The second off curve point
:param P3: The End point of the Curve
:param t: The time parameter
:return: A list of points that represent two curves. (Q0, Q1, Q2, Q3, R1, R2, R3). Note that the "middle" point is only returned once.
:rtype: list
'''
def distance(P1, P2):
return math.hypot(P1[0] - P2[0], P1[1] - P2[1])
'''
.. function:: distance(P0, P1)
calculates the distance between two NSPoints
:param P0: a NSPoint
:param P1: another NSPoint
:return: The distance
:rtype: float
'''
def addPoints(P1, P2):
return NSMakePoint(P1[0] + P2[0], P1[1] + P2[1])
'''
.. function:: addPoints(P1, P2)
Add the points.
:param P0: a NSPoint
:param P1: another NSPoint
:return: The sum of both points
:rtype: NSPoint
'''
def subtractPoints(P1, P2):
return NSMakePoint(P1[0] - P2[0], P1[1] - P2[1])
'''
.. function:: subtractPoints(P1, P2)
Subtracts the points.
:param P0: a NSPoint
:param P1: another NSPoint
:return: The subtracted point
:rtype: NSPoint
'''
def scalePoint(P, scalar):
return NSMakePoint(P[0] * scalar, P[1] * scalar)
'''
.. function:: scalePoint(P, scalar)
Scaled a point.
:param P: a NSPoint
:param scalar: The Multiplier
:return: The multiplied point
:rtype: NSPoint
'''
def GetSaveFile(message=None, ProposedFileName=None, filetypes=None):
Panel = NSSavePanel.savePanel().retain()
if message is not None:
Panel.setTitle_(message)
Panel.setCanChooseFiles_(True)
Panel.setCanChooseDirectories_(False)
if filetypes is not None:
Panel.setAllowedFileTypes_(filetypes)
if ProposedFileName is not None:
if ProposedFileName.find("/") >= 0:
path, ProposedFileName = os.path.split(ProposedFileName)
Panel.setDirectoryURL_(NSURL.fileURLWithPath_(path))
Panel.setNameFieldStringValue_(ProposedFileName)
pressedButton = Panel.runModal()
if pressedButton == NSOKButton:
return Panel.filename()
return None
'''
.. function:: GetSaveFile(message=None, ProposedFileName=None, filetypes=None)
Opens a file chooser dialog.
:param message:
:param filetypes:
:param ProposedFileName:
:return: The selected file or None
:rtype: unicode
'''
def __allItems__(self):
items = []
for key in self.allKeys():
value = self[key]
items.append((key, value))
return items
MGOrderedDictionary.items = __allItems__
def __allKeys__(self):
return self.allKeys()
MGOrderedDictionary.keys = __allKeys__
def __Dict_removeObjectForKey__(self, key):
if isinstance(key, int):
if key < 0:
key += len(self)
if key < 0:
raise IndexError("list index out of range")
self.removeObjectAtIndex_(key)
return
self.removeObjectForKey_(key)
MGOrderedDictionary.__delitem__ = __Dict_removeObjectForKey__
GSNotifyingDictionary.items = __allItems__
GSNotifyingDictionary.keys = __allKeys__
# This should be possible but the way pyObjc wrapper works does not allow it.
# http://permalink.gmane.org/gmane.comp.python.pyobjc.devel/5493
# def __Dict__objectForKey__(self, key):
# if isinstance(key, int):
# if key < 0:
# key += len(self)
# if key < 0:
# raise IndexError("list index out of range")
# self.objectAtIndex_(key)
# return
# self.objectForKey_(key)
# MGOrderedDictionary.__getitem__ = __Dict__objectForKey__
def __Dict__iter__(self):
Values = self.values()
if Values is not None:
for element in Values:
yield element
MGOrderedDictionary.__iter__ = __Dict__iter__
def __Dict__del__(self, key):
self.removeObjectForKey_(key)
MGOrderedDictionary.__delattr__ = __Dict__del__
def GetFile(message=None, allowsMultipleSelection=False, filetypes=None):
return GetOpenFile(message, allowsMultipleSelection, filetypes)
def GetOpenFile(message=None, allowsMultipleSelection=False, filetypes=None, path=None):
if filetypes is None:
filetypes = []
Panel = NSOpenPanel.openPanel().retain()
Panel.setCanChooseFiles_(True)
Panel.setCanChooseDirectories_(False)
Panel.setAllowsMultipleSelection_(allowsMultipleSelection)
if path is not None:
Panel.setDirectory_(path)
if message is not None:
Panel.setTitle_(message)
if filetypes is not None and len(filetypes) > 0:
Panel.setAllowedFileTypes_(filetypes)
pressedButton = Panel.runModal()
if pressedButton == NSOKButton:
if allowsMultipleSelection:
return Panel.filenames()
else:
return Panel.filename()
return None
'''
.. function:: GetOpenFile(message=None, allowsMultipleSelection=False, filetypes=None)
Opens a file chooser dialog.
:param message: A message string.
:param allowsMultipleSelection: Boolean, True if user can select more than one file
:param filetypes: list of strings indicating the filetypes, e.g., ["gif", "pdf"]
:param path: The initial directory path
:return: The selected file or a list of file names or None
:rtype: unicode or list
'''
def GetFolder(message=None, allowsMultipleSelection=False, path=None):
Panel = NSOpenPanel.openPanel().retain()
Panel.setCanChooseFiles_(False)
Panel.setCanChooseDirectories_(True)
Panel.setAllowsMultipleSelection_(allowsMultipleSelection)
if path is not None:
Panel.setDirectory_(path)
pressedButton = Panel.runModal()
if pressedButton == NSOKButton:
if allowsMultipleSelection:
return Panel.filenames()
else:
return Panel.filename()
return None
'''
.. function:: GetFolder(message=None, allowsMultipleSelection = False)
Opens a folder chooser dialog.
:param message:
:param allowsMultipleSelection:
:param path:
:return: The selected folder or None
:rtype: unicode
'''
def Message(message, title="Alert", OKButton=None):
Glyphs.showAlert_message_OKButton_(title, message, OKButton)
'''
.. function:: Message(title, message, OKButton=None)
Shows an alert panel.
:param title:
:param message:
:param OKButton:
'''
def LogToConsole(message, title=None):
f = sys._getframe(1)
if not title:
title = "<>"
try:
title = f.f_code.co_name + " (line %d)" % f.f_lineno
except:
pass
myLog = "Log message from \"%s\":\n%s" % (title, message)
NSLog(myLog)
'''
.. function:: LogToConsole(message)
Write a message to the Mac's Console.app for debugging.
:param message:
'''
lastErrorMessage = ''
def LogError(message):
global lastErrorMessage
if message != lastErrorMessage:
lastErrorMessage = message
sys.stderr.write(message)
'''
.. function:: LogError(message)
Log an error message and write it to the Macro window’s output (in red).
:param message:
'''
'''
Constants
=========
Node types
.. data:: LINE
Line node.
.. data:: CURVE
Curve node. Make sure that each curve node is preceded by two off-curve nodes.
.. data:: QCURVE
Quadratic curve node. Make sure that each curve node is preceded by at least one off-curve node.
.. data:: OFFCURVE
Off-cuve node
Export formats
==============
.. data:: OTF
Write CFF based font
.. data:: TTF
Write CFF based font
.. data:: VARIABLE
Write Variable font
.. data:: UFO
Write UFO based font
.. data:: WOFF
Write WOFF
.. data:: WOFF2
Write WOFF
.. data:: PLAIN
do not package as webfont
.. data:: EOT
Write EOT
.. versionadded:: 2.5
Hint types
==========
.. data:: TOPGHOST
Top ghost for PS hints
.. data:: STEM
Stem for PS hints
.. data:: BOTTOMGHOST
Bottom ghost for PS hints
.. data:: TTANCHOR
Anchor for TT hints
.. data:: TTSTEM
Stem for TT hints
.. data:: TTALIGN
Align for TT hints
.. data:: TTINTERPOLATE
Interpolation for TT hints
.. data:: TTDIAGONAL
Diagonal for TT hints
.. data:: TTDELTA
Delta TT hints
.. data:: CORNER
Corner Component
.. data:: CAP
Cap Component
Hint Option
===========
This is only used for TrueType hints.
.. data:: TTROUND
Round to grid
.. data:: TTROUNDUP
Round up
.. data:: TTROUNDDOWN
Round down
.. data:: TTDONTROUND
Don’t round at all
.. data:: TRIPLE = 128
Indicates a triple hint group. There need to be exactly three horizontal TTStem hints with this setting to take effect.
Menu Tags
=========
This are tags to access the menu items in the apps main menu. Please see :attr:`GSApplication.menu` for details
.. data:: APP_MENU
The 'Glyphs' menu
.. data:: FILE_MENU
The File menu
.. data:: EDIT_MENU
The Edit menu
.. data:: GLYPH_MENU
The Glyph menu
.. data:: PATH_MENU
The Path menu
.. data:: FILTER_MENU
The Filter menu
.. data:: VIEW_MENU
The View menu
.. data:: SCRIPT_MENU
The Script menu
.. data:: WINDOW_MENU
The Window menu
.. data:: HELP_MENU
The Help menu
Menu States
===========
.. data:: ONSTATE
The menu entry will have a checkbox
.. data:: OFFSTATE
The menu entry will have no checkbox
.. data:: MIXEDSTATE
The menu entry will have horizontal line
Callback Keys
=============
This are the available callbacks
.. data:: DRAWFOREGROUND
to draw in the foreground
.. data:: DRAWBACKGROUND
to draw in the background
.. data:: DRAWINACTIVE
draw inactive glyphs
.. data:: DOCUMENTOPENED
is called if a new document is opened
.. data:: DOCUMENTACTIVATED
is called when the document becomes the active document
.. data:: DOCUMENTWASSAVED
is called when the document is saved.
The document itself is passed in notification.object()
.. data:: DOCUMENTEXPORTED
if a font is exported. This is called for every instance and ``notification.object()`` will contain the path to the final font file.
.. code-block:: python
def exportCallback(info):
try:
print(info.object())
except:
# Error. Print exception.
import traceback
print(traceback.format_exc())
# add your function to the hook
Glyphs.addCallback(exportCallback, DOCUMENTEXPORTED)
.. data:: DOCUMENTCLOSED
is called when the document is closed
.. data:: TABDIDOPEN
if a new tab is opened
.. data:: TABWILLCLOSE
if a tab is closed
.. data:: UPDATEINTERFACE
if some thing changed in the edit view. Maybe the selection or the glyph data.
.. data:: MOUSEMOVED
is called if the mouse is moved. If you need to draw something, you need to call `Glyphs.redraw()` and also register to one of the drawing callbacks.
Writing Directions
==================
The writing directions of the Edit View.
.. data:: LTR
Left To Right (e.g. Latin)
.. data:: RTL
Right To Left (e.g. Arabic, Hebrew)
.. data:: LTRTTB
Left To Right, Top To Bottom
.. data:: RTLTTB
Right To Left, Top To Bottom
Annotation types
================
.. data:: TEXT
.. data:: ARROW
.. data:: CIRCLE
.. data:: PLUS
.. data:: MINUS
'''
| schriftgestalt/GlyphsSDK | ObjectWrapper/GlyphsApp/__init__.py | Python | apache-2.0 | 268,731 |
import textwrap
# This dictionary contains the map of the school.
#
# Each entry in the dictionary is a room or other location. The key is the
# room name ("the_office"), and the value is another dictionary containing
# properties of that room, as follows:
#
# description: the text to be displayed when the player walks into this room.
#
# north, south, east, west: optional entries specifying the name of the room
# that the player arrives at when walking out of this room. If not
# specified, then the player cannot go that way. It is possible to
# arrive back at the same room; see maze1 and maze2!
#
school_map={
"the_office": {
"description": "You are standing outside the school office. You are surrounded by gleaming sofas made of solid gold. Paths lead in all directions.",
"north": "east_gate",
"east": "staff_room",
"west": "corridor",
"south": "hall"
},
"east_gate": {
"description": "You are standing outside the main door to the school. Your escape to the road is blocked by a deep muddy trench from The Great War. Beyond the trench you can see barbed wire across no-man's land. The door to the school is south.",
"south": "the_office"
},
"staff_room": {
"description": "You are in the staff room. There is a strong smell of garlic, socks and chocolate digestives. The only door leads west.",
"west": "the_office"
},
"hall": {
"description": "You are in the Great Viking Hall of Stamford Green. Long oak tables with silver goblets are lie across the room ready for a banquet, and Valkyries soar overhead. A sign on the wall says that the value of the month is 'Pillaging'. The office is north. Another doorway leads east.",
"north": "the_office",
"east": "kitchen"
},
"kitchen": {
"description": "You are in a kitchen. Flickering strip lighting illuminates filthy work surfaces. A menu for roast maggots slies next to an overflowing sink. An open doorway leads west.",
"west": "hall"
},
"corridor": {
"description": "You are in a corridor leading towards the West Wing. Cones, sirens, flashing lights and a 'DO NOT ENTER' sign suggest that construction is not quite finished. To the west, where the building should be, there is a currently a deep hole in the ground. You cannot see the bottom of the pit. A path east leads back to the office.",
"east": "the_office",
"west": "tunnel"
},
"tunnel": {
"description": "You are in a tunnel at the bottom of a pit, with a dark passage to the north, and a light to the east. Scratched on the wall are the cryptic letters 'AshLEy wiL nvR WIN'.",
"east": "corridor",
"north": "maze1"
},
"maze1": {
"description": "You are in an underground maze of twisting passages, all alike.",
"east": "tunnel",
"west": "maze2",
"north": "maze1",
"south": "maze2"
},
"maze2": {
"description": "You are in an underground maze of twisting passages, all alike. You can feel a warm gentle breeze.",
"east": "maze1",
"west": "maze2",
"north": "maze1",
"south": "top_field"
},
"top_field": {
"description": "You emerge into daylight at the top field beside a running track. There is a hole to the north.",
"west": "escape",
"north": "maze2"
},
"escape": {
"description": "You have left the school by the west gate."
}
}
objects={
"meat": {
"name": "a piece of uncooked meat",
"synonyms": {"meat", "uncooked meat"},
"can_take": True,
"location": "kitchen"
},
"key": {
"name": "a metal key",
"synonyms": {"key", "metal key"},
"can_take": True,
"location": "staff_room"
},
"dog_awake": {
"description": "A black dog with two heads guards the pit.",
"name": "a dog",
"synonyms": {"dog"},
"can_take": False,
"location": "corridor"
},
"dog_asleep": {
"description": "A black dog is asleep in the corner.",
"name": "a sleeping dog",
"synonyms": {"dog"},
"can_take": False,
"location": "corridor",
"hidden": True
},
"gate_locked": {
"description": "You can see the west gate which is locked.",
"name": "gate",
"synonyms": {"gate", "west gate"},
"can_take": False,
"location": "top_field"
},
"gate_open": {
"description": "The west gate is open.",
"name": "gate",
"synonyms": {"gate", "west gate"},
"can_take": False,
"location": "top_field",
"hidden": True
}
}
def look():
'''
Prints the description of the room named by the global variable 'location'.
'''
description=school_map[location]["description"]
objects_with_descriptions=[]
objects_to_list=[]
for obj_properties in objects.values():
if obj_properties.get("location") == location and obj_properties.get("hidden") != True:
if "description" in obj_properties:
objects_with_descriptions.append(obj_properties)
else:
objects_to_list.append(obj_properties)
for obj in objects_with_descriptions:
description = description + " " + obj["description"]
if len(objects_to_list) > 0:
description = description + " Lying on the ground you can see"
if len(objects_to_list) > 1:
for obj in objects_to_list[0:-1]:
description = description + " " + obj["name"] + ","
description = description[0:-1] + " and"
description = description + " " + objects_to_list[-1]["name"] + "."
formatted_description=textwrap.fill(description)
print(formatted_description)
def object_with_name(name):
for obj_name, obj_properties in objects.items():
if name in obj_properties["synonyms"] and obj_properties.get("hidden") != True:
return (obj_name, obj_properties)
return (None, None)
def take(obj_name):
name, obj=object_with_name(obj_name)
if obj==None:
print("I don't understand '" + obj_name + "'.")
return
if obj in carried_objects:
print("You are already holding " + obj["name"] + ".");
return
if obj.get("location") != location:
print("You cannot see " + obj["name"] + ".")
return
if obj.get("can_take") == False:
print("You can't take that!")
return
obj["location"] = None
carried_objects.append(name)
print("You take " + obj["name"] + ".");
def drop(obj_name):
name, obj=object_with_name(obj_name)
if obj==None:
print("I don't understand '" + obj_name + "'.")
return
if not name in carried_objects:
print("You are not holding " + obj["name"] + ".");
return
obj["location"] = location
carried_objects.remove(name)
print("You drop " + obj["name"] + ".");
def inventory():
print("")
if len(carried_objects) == 0:
print("You are not carrying anything.")
else:
print("You have clutched in your sweaty hands:")
for obj_name in carried_objects:
print(" " + objects[obj_name]["name"])
def go(direction):
'''
Returns the name of the room in the given direction ('north', 'east', 'south' or 'west')
from the player's current location, or None if the player cannot go that way.
'''
next_location=school_map[location].get(direction)
if next_location == None:
print("You can't go that way.")
return next_location
def help():
print("Instructions:")
print("The aim of the game is to explore and escape from Stamford Green Primary School.")
print(" 1. Use 'north', 'east', 'south' or 'west' (or 'n', 'e', 's' or 'w') to move.")
print(" 2. Type 'look' to see what you can see.")
print(" 3. Use 'take <object>' and 'drop <object>' to take and drop objects.")
print(" 4. Use 'inventory' (or 'invent' or 'i') to see what you are carrying.")
print(" 5. Display this message again by typing 'help'.")
# The main part of the program starts here
print("Escape From Stamford Green!")
print("---------------------------")
print("Type 'help' for instructions.")
location="the_office" # Global variable containing the player's current location
carried_objects=[]
look()
while location != "escape":
print()
command=input("> ").lower()
move_to_location=None
if command=="north" or command=='n':
move_to_location=go("north")
elif command=="south" or command=='s':
move_to_location=go("south")
elif command=="east" or command=='e':
move_to_location=go("east")
elif command=="west" or command=='w':
move_to_location=go("west")
elif command.startswith("take "):
take(command[5:])
elif command.startswith("drop "):
drop(command[5:])
elif command=="inventory" or command=="invent" or command=="i":
inventory()
elif command=="look":
look()
elif command=="help":
help()
else:
print("I don't understand that! Try 'north', 'south', 'east' or 'west', or 'help'.")
if location=="corridor" and move_to_location=="tunnel" and objects["dog_awake"].get("hidden") != True:
print("The dog snarls and blocks your way.")
move_to_location = None
if location=="top_field" and move_to_location=="escape" and objects["gate_locked"].get("hidden") != True:
print("The gate is closed and locked. The lock has a key hole.")
move_to_location = None
if move_to_location != None:
location=move_to_location
look()
if objects["meat"].get("location") == "corridor":
print("The dog eats the piece of meat and falls asleep.")
objects["meat"]["location"] = None
objects["dog_awake"]["hidden"] = True
objects["dog_asleep"]["hidden"] = False
if location=="top_field" and objects["gate_open"].get("hidden") == True and "key" in carried_objects:
print("You unlock the gate with the key and it swings open.")
objects["gate_locked"]["hidden"] = True
objects["gate_open"]["hidden"] = False
print()
print("Congratulations, you have escaped from Stamford Green!")
| chrisglencross/python-lessons | misc/escape_from_stamford_green_v2.py | Python | mit | 10,412 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from federatedml.framework.homo.blocks import paillier_cipher
from federatedml.framework.homo.blocks.paillier_cipher import PaillierCipherTransVar
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.fate_paillier import PaillierPublicKey
class Host(object):
def __init__(self, trans_var=None):
if trans_var is None:
trans_var = PaillierCipherTransVar()
self._paillier = paillier_cipher.Client(trans_var=trans_var)
def register_paillier_cipher(self, transfer_variables):
pass
def gen_paillier_pubkey(self, enable, suffix=tuple()) -> typing.Union[PaillierPublicKey, None]:
return self._paillier.gen_paillier_pubkey(enable=enable, suffix=suffix)
def set_re_cipher_time(self, re_encrypt_times, suffix=tuple()):
return self._paillier.set_re_cipher_time(re_encrypt_times=re_encrypt_times, suffix=suffix)
def re_cipher(self, w, iter_num, batch_iter_num, suffix=tuple()):
return self._paillier.re_cipher(w=w, iter_num=iter_num, batch_iter_num=batch_iter_num, suffix=suffix)
class Arbiter(object):
def register_paillier_cipher(self, transfer_variables):
pass
def __init__(self, trans_var=None):
if trans_var is None:
trans_var = PaillierCipherTransVar()
self._paillier = paillier_cipher.Server(trans_var=trans_var)
self._client_parties = trans_var.client_parties
self._party_idx_map = {party: idx for idx, party in enumerate(self._client_parties)}
def paillier_keygen(self, key_length, suffix=tuple()) -> typing.Mapping[int, typing.Union[PaillierEncrypt, None]]:
ciphers = self._paillier.keygen(key_length, suffix)
return {self._party_idx_map[party]: cipher for party, cipher in ciphers.items()}
def set_re_cipher_time(self, ciphers: typing.Mapping[int, typing.Union[PaillierEncrypt, None]],
suffix=tuple()):
_ciphers = {self._client_parties[idx]: cipher for idx, cipher in ciphers.items()}
recipher_times = self._paillier.set_re_cipher_time(_ciphers, suffix)
return {self._party_idx_map[party]: time for party, time in recipher_times.items()}
def re_cipher(self, iter_num, re_encrypt_times, host_ciphers_dict, re_encrypt_batches, suffix=tuple()):
_ciphers = {self._client_parties[idx]: cipher for idx, cipher in host_ciphers_dict.items()}
_re_encrypt_times = {self._client_parties[idx]: time for idx, time in re_encrypt_times.items()}
return self._paillier.re_cipher(iter_num, _re_encrypt_times, _ciphers, re_encrypt_batches, suffix)
| FederatedAI/FATE | python/federatedml/framework/homo/procedure/paillier_cipher.py | Python | apache-2.0 | 3,245 |
from setuptools import setup
req = [
'nose==1.3.0',
'wsgiref==0.1.2',
]
setup(
name='foo',
version='0.1',
url='',
py_modules=[],
scripts=[],
author='John Doe',
description='yadda yadda',
zip_safe=False,
install_requires=req,
)
| sourcegraph/python-deps | testdata/setup_repo_hard/setup.py | Python | bsd-2-clause | 273 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = ''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| Chitrank-Dixit/werkzeug | werkzeug/contrib/profiler.py | Python | bsd-3-clause | 4,920 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_router
description:
- Represents a Router resource.
short_description: Creates a GCP Router
version_added: '2.7'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match the regular
expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must
be a lowercase letter, and all following characters must be a dash, lowercase
letter, or digit, except the last character, which cannot be a dash.
required: true
type: str
description:
description:
- An optional description of this resource.
required: false
type: str
network:
description:
- A reference to the network to which this router belongs.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_network task and then set this network field to "{{ name-of-resource
}}"'
required: true
type: dict
bgp:
description:
- BGP information specific to this router.
required: false
type: dict
suboptions:
asn:
description:
- Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN,
either 16-bit or 32-bit. The value will be fixed for this router resource.
All VPN tunnels that link to this router will have the same local ASN.
required: true
type: int
advertise_mode:
description:
- User-specified flag to indicate which mode to use for advertisement.
- 'Valid values of this enum field are: DEFAULT, CUSTOM .'
- 'Some valid choices include: "DEFAULT", "CUSTOM"'
required: false
default: DEFAULT
type: str
advertised_groups:
description:
- User-specified list of prefix groups to advertise in custom mode.
- This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These groups will be advertised in addition
to any specified prefixes. Leave this field blank to advertise no custom
groups.
- 'This enum field has the one valid value: ALL_SUBNETS .'
required: false
type: list
advertised_ip_ranges:
description:
- User-specified list of individual IP ranges to advertise in custom mode.
This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These IP ranges will be advertised in addition
to any specified groups.
- Leave this field blank to advertise no custom IP ranges.
required: false
type: list
suboptions:
range:
description:
- The IP range to advertise. The value must be a CIDR-formatted string.
required: false
type: str
description:
description:
- User-specified description for the IP range.
required: false
type: str
region:
description:
- Region where the router resides.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/routers)'
- 'Google Cloud Router: U(https://cloud.google.com/router/docs/)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a network
gcp_compute_network:
name: network-router
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a router
gcp_compute_router:
name: test_object
network: "{{ network }}"
bgp:
asn: 64514
advertise_mode: CUSTOM
advertised_groups:
- ALL_SUBNETS
advertised_ip_ranges:
- range: 1.2.3.4
- range: 6.7.0.0/16
region: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
id:
description:
- The unique identifier for the resource.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase
letter, and all following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
network:
description:
- A reference to the network to which this router belongs.
returned: success
type: dict
bgp:
description:
- BGP information specific to this router.
returned: success
type: complex
contains:
asn:
description:
- Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN,
either 16-bit or 32-bit. The value will be fixed for this router resource.
All VPN tunnels that link to this router will have the same local ASN.
returned: success
type: int
advertiseMode:
description:
- User-specified flag to indicate which mode to use for advertisement.
- 'Valid values of this enum field are: DEFAULT, CUSTOM .'
returned: success
type: str
advertisedGroups:
description:
- User-specified list of prefix groups to advertise in custom mode.
- This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These groups will be advertised in addition to
any specified prefixes. Leave this field blank to advertise no custom groups.
- 'This enum field has the one valid value: ALL_SUBNETS .'
returned: success
type: list
advertisedIpRanges:
description:
- User-specified list of individual IP ranges to advertise in custom mode. This
field can only be populated if advertiseMode is CUSTOM and is advertised to
all peers of the router. These IP ranges will be advertised in addition to
any specified groups.
- Leave this field blank to advertise no custom IP ranges.
returned: success
type: complex
contains:
range:
description:
- The IP range to advertise. The value must be a CIDR-formatted string.
returned: success
type: str
description:
description:
- User-specified description for the IP range.
returned: success
type: str
region:
description:
- Region where the router resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
description=dict(type='str'),
network=dict(required=True, type='dict'),
bgp=dict(
type='dict',
options=dict(
asn=dict(required=True, type='int'),
advertise_mode=dict(default='DEFAULT', type='str'),
advertised_groups=dict(type='list', elements='str'),
advertised_ip_ranges=dict(type='list', elements='dict', options=dict(range=dict(type='str'), description=dict(type='str'))),
),
),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#router'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.patch(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#router',
u'region': module.params.get('region'),
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'bgp': RouterBgp(module.params.get('bgp', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'id': response.get(u'id'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'name': module.params.get('name'),
u'description': response.get(u'description'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'bgp': RouterBgp(response.get(u'bgp', {}), module).from_response(),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#router')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class RouterBgp(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'asn': self.request.get('asn'),
u'advertiseMode': self.request.get('advertise_mode'),
u'advertisedGroups': self.request.get('advertised_groups'),
u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get('advertised_ip_ranges', []), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'asn': self.request.get(u'asn'),
u'advertiseMode': self.request.get(u'advertiseMode'),
u'advertisedGroups': self.request.get(u'advertisedGroups'),
u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get(u'advertisedIpRanges', []), self.module).from_response(),
}
)
class RouterAdvertisediprangesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'range': item.get('range'), u'description': item.get('description')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'range': item.get(u'range'), u'description': item.get(u'description')})
if __name__ == '__main__':
main()
| sestrella/ansible | lib/ansible/modules/cloud/google/gcp_compute_router.py | Python | gpl-3.0 | 18,863 |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
# STM32_TYPE =
# 'STM32F10X_LD','STM32F10X_LD_VL',
# 'STM32F10X_MD','STM32F10X_MD_VL',
# 'STM32F10X_HD','STM32F10X_HD_VL',
# 'STM32F10X_XL','STM32F10X_CL'
#STM32_TYPE = 'STM32F10X_HD'
PART_TYPE = 'STM32F10X_HD'
# lcd panel options
# 'FMT0371','ILI932X', 'SSD1289'
# RT_USING_LCD_TYPE = 'SSD1289'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'D:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
IAR_PATH = 'C:/Program Files/IAR Systems/Embedded Workbench 6.0 Evaluation'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f10x_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| apinggithub/-rtthread2.1.0app | bsp/stm32f10x-curer/rtconfig.py | Python | gpl-2.0 | 3,635 |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import timedelta
import psutil
import subprocess
import re
import time
def home(request):
return render(request, 'home/index.html')
def datetime(request):
return HttpResponse(time.strftime("%a %d %b %Y - %H:%M:%S"))
def uptime(request):
with open('/proc/uptime', 'r') as f:
uptimeSecs = float(f.readline().split()[0])
uptimeStr = str(timedelta(seconds = uptimeSecs))
return HttpResponse(uptimeStr.split('.')[0])
def cpuUsage(request):
return HttpResponse(str(int(psutil.cpu_percent(interval = 1) + 0.5)) + '%')
def memoryUsage(request):
return HttpResponse(str(int(psutil.virtual_memory()[2] + 0.5)) + '%')
def diskUsage(request):
cmdOutput = subprocess.Popen(['df', '-h', '/dev/sda1'], stdout = subprocess.PIPE).communicate()[0]
p = re.compile('\s\d+\%\s')
return HttpResponse(p.findall(cmdOutput)[0].strip())
| luiscarlosgph/nas | home/views.py | Python | mit | 916 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import assemble
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'assemble'
copyright = u'2014, Kracekumar Ramaraju'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = assemble.__version__
# The full version, including alpha/beta/rc tags.
release = assemble.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'assembledoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'assemble.tex',
u'assemble Documentation',
u'Kracekumar Ramaraju', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'assemble',
u'assemble Documentation',
[u'Kracekumar Ramaraju'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'assemble',
u'assemble Documentation',
u'Kracekumar Ramaraju',
'assemble',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | kracekumar/assemble | docs/conf.py | Python | bsd-3-clause | 8,429 |
from __future__ import unicode_literals
from scorched.compat import str
import itertools
import json
import requests
import scorched.compat
import scorched.dates
import scorched.exc
import scorched.response
import scorched.search
import time
import warnings
MAX_LENGTH_GET_URL = 2048
# Jetty default is 4096; Tomcat default is 8192; picking 2048 to be
# conservative.
def is_iter(val):
return isinstance(val, (tuple, list))
class SolrConnection(object):
readable = True
writeable = True
def __init__(self, url, http_connection, mode, retry_timeout,
max_length_get_url, search_timeout=()):
"""
:param url: url to Solr
:type url: str
:param http_connection: existing requests.Session object, or None to
create a new one.
:type http_connection: requests connection
:param mode: mode (readable, writable) Solr
:type mode: str
:param retry_timeout: timeout until retry
:type retry_timeout: int
:param max_length_get_url: max length until switch to post
:type max_length_get_url: int
:param search_timeout: (optional) How long to wait for the server to
send data before giving up, as a float, or a
(connect timeout, read timeout) tuple.
:type search_timeout: float or tuple
"""
self.http_connection = http_connection or requests.Session()
if mode == 'r':
self.writeable = False
elif mode == 'w':
self.readable = False
self.url = url.rstrip("/") + "/"
self.update_url = self.url + "update/json"
self.select_url = self.url + "select/"
self.mlt_url = self.url + "mlt/"
self.get_url = self.url + "get/"
self.retry_timeout = retry_timeout
self.max_length_get_url = max_length_get_url
self.search_timeout = search_timeout
def request(self, *args, **kwargs):
"""
:param args: arguments
:type args: tuple
:param kwargs: key word arguments
:type kwargs: dict
.. todo::
Make this api more explicit!
"""
try:
return self.http_connection.request(*args, **kwargs)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
if self.retry_timeout < 0:
raise
time.sleep(self.retry_timeout)
return self.http_connection.request(*args, **kwargs)
def get(self, ids, fl=None):
"""
Perform a RealTime Get
"""
# We always send the ids parameter to force the standart output format,
# but use the id parameter for our actual data as `ids` can no handle
# ids with commas
params = [
("ids", ""),
("wt", "json"),
]
if is_iter(ids):
for id in ids:
params.append(("id", id))
else:
params.append(("id", ids))
if fl:
params.append(("fl", ",".join(fl)))
qs = scorched.compat.urlencode(params)
url = "%s?%s" % (self.get_url, qs)
response = self.request("GET", url)
if response.status_code != 200:
raise scorched.exc.SolrError(response)
return response.text
def update(self, update_doc, **kwargs):
"""
:param update_doc: data send to Solr
:type update_doc: json data
:returns: json -- json string
Send json to Solr
"""
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
body = update_doc
if body:
headers = {"Content-Type": "application/json; charset=utf-8"}
else:
headers = {}
url = self.url_for_update(**kwargs)
response = self.request('POST', url, data=body, headers=headers)
if response.status_code != 200:
raise scorched.exc.SolrError(response)
return response.text
def url_for_update(self, commit=None, commitWithin=None, softCommit=None,
optimize=None, waitSearcher=None, expungeDeletes=None,
maxSegments=None):
"""
:param commit: optional -- commit actions
:type commit: bool
:param commitWithin: optional -- document will be added within that
time
:type commitWithin: int
:param softCommit: optional -- performant commit without "on-disk"
guarantee
:type softCommit: bool
:param optimize: optional -- optimize forces all of the index segments
to be merged into a single segment first.
:type optimze: bool
:param waitSearcher: optional -- block until a new searcher is opened
and registered as the main query searcher,
:type waitSearcher: bool
:param expungeDeletes: optional -- merge segments with deletes away
:type expungeDeletes: bool
:param maxSegments: optional -- optimizes down to at most this number
of segments
:type maxSegments: int
:returns: str -- url with all extra paramters set
This functions sets all extra parameters for the ``optimize`` and
``commit`` function.
"""
extra_params = {}
if commit is not None:
extra_params['commit'] = "true" if commit else "false"
if commitWithin is not None:
try:
extra_params['commitWithin'] = int(commitWithin)
except (TypeError, ValueError):
raise ValueError(
"commitWithin should be a number in milliseconds")
if extra_params['commitWithin'] < 0:
raise ValueError(
"commitWithin should be a number in milliseconds")
extra_params['commitWithin'] = str(extra_params['commitWithin'])
if softCommit is not None:
extra_params['softCommit'] = "true" if softCommit else "false"
if optimize is not None:
extra_params['optimize'] = "true" if optimize else "false"
if waitSearcher is not None:
extra_params['waitSearcher'] = "true" if waitSearcher else "false"
if expungeDeletes is not None:
extra_params[
'expungeDeletes'] = "true" if expungeDeletes else "false"
if maxSegments is not None:
try:
extra_params['maxSegments'] = int(maxSegments)
except (TypeError, ValueError):
raise ValueError("maxSegments")
if extra_params['maxSegments'] <= 0:
raise ValueError("maxSegments should be a positive number")
extra_params['maxSegments'] = str(extra_params['maxSegments'])
if 'expungeDeletes' in extra_params and 'commit' not in extra_params:
raise ValueError("Can't do expungeDeletes without commit")
if 'maxSegments' in extra_params and 'optimize' not in extra_params:
raise ValueError("Can't do maxSegments without optimize")
if extra_params:
return "%s?%s" % (self.update_url, scorched.compat.urlencode(
sorted(extra_params.items())))
else:
return self.update_url
def select(self, params):
"""
:param params: LuceneQuery converted to a dictionary with search
queries
:type params: dict
:returns: json -- json string
We perform here a search on the `select` handler of Solr.
"""
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params.append(('wt', 'json'))
qs = scorched.compat.urlencode(params)
url = "%s?%s" % (self.select_url, qs)
if len(url) > self.max_length_get_url:
warnings.warn(
"Long query URL encountered - POSTing instead of "
"GETting. This query will not be cached at the HTTP layer")
url = self.select_url
method = 'POST'
kwargs = {
'data': qs,
'headers': {
"Content-Type": "application/x-www-form-urlencoded"}}
else:
method = 'GET'
kwargs = {}
if self.search_timeout != ():
kwargs['timeout'] = self.search_timeout
response = self.request(method, url, **kwargs)
if response.status_code != 200:
raise scorched.exc.SolrError(response)
return response.text
def mlt(self, params, content=None):
"""
:param params: LuceneQuery converted to a dictionary with search
queries
:type params: dict
:returns: json -- json string
Perform a MoreLikeThis query using the content specified
There may be no content if stream.url is specified in the params.
"""
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params.append(('wt', 'json'))
qs = scorched.compat.urlencode(params)
base_url = "%s?%s" % (self.mlt_url, qs)
method = 'GET'
kwargs = {}
if content is None:
url = base_url
else:
get_url = "%s&stream.body=%s" % (
base_url, scorched.compat.quote_plus(content))
if len(get_url) <= self.max_length_get_url:
url = get_url
else:
url = base_url
method = 'POST'
kwargs = {
'data': content,
'headers': {"Content-Type": "text/plain; charset=utf-8"}}
response = self.request(method, url, **kwargs)
if response.status_code != 200:
raise scorched.exc.SolrError(response.content)
return response.text
class SolrInterface(object):
remote_schema_file = "schema?wt=json"
def __init__(self, url, http_connection=None, mode='',
retry_timeout=-1, max_length_get_url=MAX_LENGTH_GET_URL,
search_timeout=()):
"""
:param url: url to Solr
:type url: str
:param http_connection: optional -- already existing connection
:type http_connection: requests connection
:param mode: optional -- mode (readable, writable) Solr
:type mode: str
:param retry_timeout: optional -- timeout until retry
:type retry_timeout: int
:param max_length_get_url: optional -- max length until switch to post
:type max_length_get_url: int
:param search_timeout: (optional) How long to wait for the server to
send data before giving up, as a float, or a
(connect timeout, read timeout) tuple.
:type search_timeout: float or tuple
"""
self.conn = SolrConnection(
url, http_connection, mode, retry_timeout, max_length_get_url)
self.schema = self.init_schema()
self._datefields = self._extract_datefields(self.schema)
def init_schema(self):
response = self.conn.request(
'GET', scorched.compat.urljoin(self.conn.url,
self.remote_schema_file))
if response.status_code != 200:
raise EnvironmentError(
"Couldn't retrieve schema document - status code %s\n%s" % (
response.status_code, response.content)
)
return response.json()['schema']
def _extract_datefields(self, schema):
ret = [x['name'] for x in
schema['fields'] if x['type'] == 'date']
ret.extend([x['name'] for x in schema['dynamicFields']
if x['type'] == 'date'])
return ret
def _should_skip_value(self, value):
if value is None:
return True
if (
isinstance(value, dict) and
'set' in value and
value['set'] is None
):
return True
return False
def _prepare_date(self, value):
''' Prepare a value of type date
'''
if is_iter(value):
value = [str(scorched.dates.solr_date(v)) for v in
value]
else:
value = str(scorched.dates.solr_date(value))
return value
def _prepare_docs(self, docs):
prepared_docs = []
for doc in docs:
new_doc = {}
for name, value in list(doc.items()):
# XXX remove all None fields this is needed for adding date
# fields
if self._should_skip_value(value):
continue
if scorched.dates.is_datetime_field(name, self._datefields):
if isinstance(value, dict) and 'set' in value:
value['set'] = self._prepare_date(value['set'])
else:
value = self._prepare_date(value)
new_doc[name] = value
prepared_docs.append(new_doc)
return prepared_docs
def add(self, docs, chunk=100, **kwargs):
"""
:param docs: documents to be added
:type docs: dict
:param chunk: optional -- size of chunks in which the add command
should be split
:type chunk: int
:param kwargs: optinal -- additional arguments
:type kwargs: dict
:returns: list of SolrUpdateResponse -- A Solr response object.
Add a document or a list of document to Solr.
"""
if hasattr(docs, "items") or not is_iter(docs):
docs = [docs]
# to avoid making messages too large, we break the message every
# chunk docs.
ret = []
for doc_chunk in grouper(docs, chunk):
update_message = json.dumps(self._prepare_docs(doc_chunk))
ret.append(scorched.response.SolrUpdateResponse.from_json(
self.conn.update(update_message, **kwargs)))
return ret
def delete_by_query(self, query, **kwargs):
"""
:param query: criteria how witch entries should be deleted
:type query: LuceneQuery
:returns: SolrUpdateResponse -- A Solr response object.
Delete entries by a given query
"""
delete_message = json.dumps({"delete": {"query": str(query)}})
ret = scorched.response.SolrUpdateResponse.from_json(
self.conn.update(delete_message, **kwargs))
return ret
def delete_by_ids(self, ids, **kwargs):
"""
:param ids: ids of entries that should be deleted
:type ids: list
:returns: SolrUpdateResponse -- A Solr response object.
Delete entries by a given id
"""
delete_message = json.dumps({"delete": ids})
ret = scorched.response.SolrUpdateResponse.from_json(
self.conn.update(delete_message, **kwargs))
return ret
def commit(self, waitSearcher=None, expungeDeletes=None, softCommit=None):
"""
:param waitSearcher: optional -- block until a new searcher is opened
and registered as the main query searcher, making
the changes visible
:type waitSearcher: bool
:param expungeDeletes: optional -- merge segments with deletes away
:type expungeDeletes: bool
:param softCommit: optional -- perform a soft commit - this will
refresh the 'view' of the index in a more performant
manner, but without "on-disk" guarantees.
:type softCommit: bool
:returns: SolrUpdateResponse -- A Solr response object.
A commit operation makes index changes visible to new search requests.
"""
ret = scorched.response.SolrUpdateResponse.from_json(
self.conn.update('{"commit": {}}', commit=True,
waitSearcher=waitSearcher,
expungeDeletes=expungeDeletes,
softCommit=softCommit))
return ret
def optimize(self, waitSearcher=None, maxSegments=None):
"""
:param waitSearcher: optional -- block until a new searcher is opened
and registered as the main query searcher, making
the changes visible
:type waitSearcher: bool
:param maxSegments: optional -- optimizes down to at most this number
of segments
:type maxSegments: int
:returns: SolrUpdateResponse -- A Solr response object.
An optimize is like a hard commit except that it forces all of the
index segments to be merged into a single segment first.
"""
ret = scorched.response.SolrUpdateResponse.from_json(
self.conn.update('{"optimize": {}}', optimize=True,
waitSearcher=waitSearcher,
maxSegments=maxSegments))
return ret
def rollback(self):
"""
:returns: SolrUpdateResponse -- A Solr response object.
The rollback command rollbacks all add/deletes made to the index since
the last commit
"""
ret = scorched.response.SolrUpdateResponse.from_json(
self.conn.update('{"rollback": {}}'))
return ret
def delete_all(self):
"""
:returns: SolrUpdateResponse -- A Solr response object.
Delete everything
"""
return self.delete_by_query(self.Q(**{"*": "*"}))
def get(self, ids, fields=None):
"""
RealTime Get document(s) by id(s)
:param ids: id(s) of the document(s)
:type ids: list, string or int
:param fields: optional -- list of fields to return
:type fileds: list of strings
"""
ret = scorched.response.SolrResponse.from_get_json(
self.conn.get(ids, fields), self._datefields)
return ret
def search(self, **kwargs):
"""
:returns: SolrResponse -- A Solr response object.
Search solr
"""
params = scorched.search.params_from_dict(**kwargs)
ret = scorched.response.SolrResponse.from_json(
self.conn.select(params),
self.schema['uniqueKey'],
self._datefields,
)
return ret
def query(self, *args, **kwargs):
"""
:returns: SolrSearch -- A solrsearch.
Build a Solr query
"""
q = scorched.search.SolrSearch(self)
if len(args) + len(kwargs) > 0:
return q.query(*args, **kwargs)
else:
return q
def mlt_search(self, content=None, **kwargs):
"""
:returns: SolrResponse -- A Solr response object.
More like this search Solr
"""
params = scorched.search.params_from_dict(**kwargs)
ret = scorched.response.SolrResponse.from_json(
self.conn.mlt(params, content=content),
self.schema['uniqueKey'],
self._datefields,
)
return ret
def mlt_query(self, fields, content=None, content_charset=None,
url=None, query_fields=None, **kwargs):
"""
:param fields: field names to compute similarity upon
:type fields: list
:param content: optional -- string on witch to find similar documents
:type content: str
:param content_charset: optional -- charset e.g. (iso-8859-1)
:type content_charset: str
:param url: optional -- like content but retrive directly from url
:type url: str
:param query_fields: optional -- adjust boosting values for ``fields``
:type query_fields: dict e.g. ({"a": 0.25, "b": 0.75})
:returns: MltSolrSearch
Perform a similarity query on MoreLikeThisHandler
The MoreLikeThisHandler is expected to be registered at the '/mlt'
endpoint in the solrconfig.xml file of the server.
Other MoreLikeThis specific parameters can be passed as kwargs without
the 'mlt.' prefix.
"""
q = scorched.search.MltSolrSearch(
self, content=content, content_charset=content_charset, url=url)
return q.mlt(fields=fields, query_fields=query_fields, **kwargs)
def extract(self, fh, extractOnly=True, extractFormat='text'):
"""
:param fh: binary file (PDF, MSWord, ODF, ...)
:type fh: open file handle
:returns: SolrExtract
Extract text and metadatada from binary file.
The ExtractingRequestHandler is expected to be registered at the
'/update/extract' endpoint in the solrconfig.xml file of the server.
"""
url = self.conn.url + 'update/extract'
params = {'wt': 'json'}
if extractOnly:
params['extractOnly'] = 'true'
params['extractFormat'] = extractFormat
files = {'file': fh}
response = self.conn.request('POST', url, params=params, files=files)
if response.status_code != 200:
raise scorched.exc.SolrError(response)
return scorched.response.SolrExtract.from_json(response.json())
def Q(self, *args, **kwargs):
q = scorched.search.LuceneQuery()
q.add(args, kwargs)
return q
def grouper(iterable, n):
"""
grouper('ABCDEFG', 3) --> [['ABC'], ['DEF'], ['G']]
"""
i = iter(iterable)
g = list(itertools.islice(i, 0, n))
while g:
yield g
g = list(itertools.islice(i, 0, n))
| lugensa/scorched | scorched/connection.py | Python | mit | 21,848 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from apps.reader.models import MUserStory
class Migration(DataMigration):
def forwards(self, orm):
userstories = MUserStory.objects.all()
print "%s userstories" % userstories.count()
for i, userstory in enumerate(userstories):
try:
if not userstory.story_id and hasattr(userstory.story, 'story_guid'):
print '%s: %s' % (i, userstory.story.story_guid)
userstory.story_id = userstory.story.story_guid
userstory.save()
# else:
# print "%s: skipping" % i
except Exception, e:
print '%s: %s' % (i, e)
print ' ---> %s' % userstory.story
raise e
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reader.feature': {
'Meta': {'ordering': "['-date']", 'object_name': 'Feature'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'reader.usersubscription': {
'Meta': {'unique_together': "(('user', 'feed'),)", 'object_name': 'UserSubscription'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribers'", 'to': "orm['rss_feeds.Feed']"}),
'feed_opens': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 27, 0, 46, 44, 202076)'}),
'mark_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 27, 0, 46, 44, 202076)'}),
'needs_unread_recalc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'oldest_unread_story_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'unread_count_negative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_neutral': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_positive': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': "orm['auth.User']"}),
'user_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'reader.usersubscriptionfolders': {
'Meta': {'object_name': 'UserSubscriptionFolders'},
'folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_link_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['reader']
| huihoo/reader | apps/reader/migrations/0011_story_ids_on_userstories.py | Python | mit | 9,767 |
from schematics.models import Model
from schematics.types import StringType
class License(Model):
name = StringType(required=True)
url = StringType()
| toumorokoshi/swagger-schema | swagger_schema/license.py | Python | mit | 160 |
from .maxflow import *
from .mincost import *
from .boykovkolmogorov import *
from .dinitz_alg import *
from .edmondskarp import *
from .preflowpush import *
from .shortestaugmentingpath import *
from .capacityscaling import *
from .networksimplex import *
from .utils import build_flow_dict, build_residual_network
| JamesClough/networkx | networkx/algorithms/flow/__init__.py | Python | bsd-3-clause | 316 |
from rest_framework import serializers
from .models import Book, Member, Category, Author
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class BookSerializer(serializers.ModelSerializer):
category = CategorySerializer()
author = AuthorSerializer()
class Meta:
model = Book
fields = '__all__'
class MemberSerializer(serializers.ModelSerializer):
book = BookSerializer()
class Meta:
model = Member
fields = '__all__' | jbuisine/MediaLibrary-RestFullAPI | project/apps/core/serializers.py | Python | mit | 672 |
def is_anagram(token, child):
hits = 0
for t in token:
for i, c in enumerate(child):
if c == t:
child = child[:i] + child[i+1:]
hits += 1
break
return hits == len(token)
def anagram(parent, child):
count = 0
for start in range(len(parent)-len(child)):
end = start + len(child)
token = parent[start:end]
if is_anagram(token, child):
count += 1
return count
print anagram('AdnBndAndBdaBn', 'dAn') # 4 ("Adn", "ndA", "dAn", "And")
print anagram('AbrAcadAbRa', 'cAda') # 2
| mre/the-coding-interview | problems/anagram-detection/anagram-detection3.py | Python | mit | 545 |
#!/usr/bin/env python3
# Copyright (c) 2015, Ralf Jung <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# This is the hook called by git post-commit. It updats all mirrors to the status of the local repository.
import traceback
from git_mirror import *
if __name__ == "__main__":
repo = None # we will try to use this during exception handling
try:
repos = load_repos()
# find the repository we are dealing with
reponame = find_repo_by_directory(repos, os.getcwd())
if reponame is None or reponame not in repos:
raise Exception("Unknown repository {}.".format(reponame))
# now sync this repository
repo = repos[reponame]
# parse the information we get from stdin. we trust this information.
for line in sys.stdin:
line = line.split()
if len(line) == 0: continue
assert len(line) == 3
(oldsha, newsha, ref) = line
repo.update_mirrors(ref, oldsha, newsha)
except Exception as e:
if repo is not None:
repo.mail_owner("There was a problem running the git-mirror git hook:\n\n{}".format(traceback.format_exc()))
# do not print all the details
sys.stderr.write("git-mirror: We have a problem:\n{}".format('\n'.join(traceback.format_exception_only(type(e), e))))
| RalfJung/git-mirror | githook.py | Python | bsd-2-clause | 2,716 |
doctests = """
########### Tests mostly copied from test_listcomps.py ############
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
set([3])
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
set([None])
########### Tests for various scoping corner cases ############
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
set([4])
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
set([4])
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
set([2])
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
set([2])
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import test_support
from test import test_setcomps
test_support.run_doctest(test_setcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
test_support.run_doctest(test_setcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_setcomps.py | Python | gpl-2.0 | 3,847 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.90.1),
on Tue May 15 14:59:08 2018
If you publish work using this script please cite the PsychoPy publications:
Peirce, JW (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'BostonDots3' # from the Builder filename that created this script
expInfo = {'session': '001', 'participant': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/Users/molfesepj/Desktop/desktop/BostonDots3/BostonDots3.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Initialize components for Routine "welcome"
welcomeClock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='Welcome to the Boston Dots Task!\n\nWaiting for Scanner...',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "instr1_solid"
instr1_solidClock = core.Clock()
text_2 = visual.TextStim(win=win, name='text_2',
text='We are about to begin!\n\nIn this block, press the same\nside of the solid circle!\n\nWe will give you two examples.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
ex1a = visual.ImageStim(
win=win, name='ex1a',
image='grey_left.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-1.0)
ex1_fix = visual.ImageStim(
win=win, name='ex1_fix',
image='fix.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-3.0)
ex1b = visual.ImageStim(
win=win, name='ex1b',
image='grey_right.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
text_3 = visual.TextStim(win=win, name='text_3',
text='Now we will begin the task.\n\nIn 3 seconds.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-6.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
dotimage = visual.ImageStim(
win=win, name='dotimage',
image='sin', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
fiximage = visual.ImageStim(
win=win, name='fiximage',
image='fix.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "instr2_striped"
instr2_stripedClock = core.Clock()
instr2_text1 = visual.TextStim(win=win, name='instr2_text1',
text='You have completed one block!\n\nIn this next block, press the opposite\nside of the striped circle!\n\nWe will give you two examples.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
ex_2_1 = visual.ImageStim(
win=win, name='ex_2_1',
image='str_left.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-1.0)
ex2_fix = visual.ImageStim(
win=win, name='ex2_fix',
image='fix.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-3.0)
ex_2_2 = visual.ImageStim(
win=win, name='ex_2_2',
image='str_right.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
text_4 = visual.TextStim(win=win, name='text_4',
text='Now we will return to the task.\n\nIn 3 seconds.',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=-6.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
dotimage = visual.ImageStim(
win=win, name='dotimage',
image='sin', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
fiximage = visual.ImageStim(
win=win, name='fiximage',
image='fix.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "instr3_mixed"
instr3_mixedClock = core.Clock()
text_5 = visual.TextStim(win=win, name='text_5',
text='You have completed two blocks!\n\nIn this next block, we will mix\nthe two conditions! \n\nWe will begin in 3 seconds',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
dotimage = visual.ImageStim(
win=win, name='dotimage',
image='sin', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
fiximage = visual.ImageStim(
win=win, name='fiximage',
image='fix.bmp', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-2.0)
# Initialize components for Routine "goodbye"
goodbyeClock = core.Clock()
text_6 = visual.TextStim(win=win, name='text_6',
text="You've completed the task!",
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "welcome"-------
t = 0
welcomeClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
startResp1 = event.BuilderKeyResponse()
# keep track of which components have finished
welcomeComponents = [text, startResp1]
for thisComponent in welcomeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "welcome"-------
while continueRoutine:
# get current time
t = welcomeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if t >= 0.0 and text.status == NOT_STARTED:
# keep track of start time/frame for later
text.tStart = t
text.frameNStart = frameN # exact frame index
text.setAutoDraw(True)
# *startResp1* updates
if t >= 0.0 and startResp1.status == NOT_STARTED:
# keep track of start time/frame for later
startResp1.tStart = t
startResp1.frameNStart = frameN # exact frame index
startResp1.status = STARTED
# keyboard checking is just starting
win.callOnFlip(startResp1.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if startResp1.status == STARTED:
theseKeys = event.getKeys(keyList=['t'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
startResp1.keys = theseKeys[-1] # just the last key pressed
startResp1.rt = startResp1.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in welcomeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "welcome"-------
for thisComponent in welcomeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if startResp1.keys in ['', [], None]: # No response was made
startResp1.keys=None
thisExp.addData('startResp1.keys',startResp1.keys)
if startResp1.keys != None: # we had a response
thisExp.addData('startResp1.rt', startResp1.rt)
thisExp.nextEntry()
# the Routine "welcome" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "instr1_solid"-------
t = 0
instr1_solidClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(12.500000)
# update component parameters for each repeat
key_resp_3 = event.BuilderKeyResponse()
key_resp_4 = event.BuilderKeyResponse()
# keep track of which components have finished
instr1_solidComponents = [text_2, ex1a, key_resp_3, ex1_fix, ex1b, key_resp_4, text_3]
for thisComponent in instr1_solidComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr1_solid"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = instr1_solidClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if t >= 0.0 and text_2.status == NOT_STARTED:
# keep track of start time/frame for later
text_2.tStart = t
text_2.frameNStart = frameN # exact frame index
text_2.setAutoDraw(True)
frameRemains = 0.0 + 4.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text_2.status == STARTED and t >= frameRemains:
text_2.setAutoDraw(False)
# *ex1a* updates
if t >= 4.0 and ex1a.status == NOT_STARTED:
# keep track of start time/frame for later
ex1a.tStart = t
ex1a.frameNStart = frameN # exact frame index
ex1a.setAutoDraw(True)
frameRemains = 4.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex1a.status == STARTED and t >= frameRemains:
ex1a.setAutoDraw(False)
# *key_resp_3* updates
if t >= 4.0 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 4.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_3.status == STARTED and t >= frameRemains:
key_resp_3.status = STOPPED
if key_resp_3.status == STARTED:
theseKeys = event.getKeys(keyList=['g', 'b', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_3.keys = theseKeys[-1] # just the last key pressed
key_resp_3.rt = key_resp_3.clock.getTime()
# was this 'correct'?
if (key_resp_3.keys == str("'g'")) or (key_resp_3.keys == "'g'"):
key_resp_3.corr = 1
else:
key_resp_3.corr = 0
# *ex1_fix* updates
if t >= 6.0 and ex1_fix.status == NOT_STARTED:
# keep track of start time/frame for later
ex1_fix.tStart = t
ex1_fix.frameNStart = frameN # exact frame index
ex1_fix.setAutoDraw(True)
frameRemains = 6.0 + 1.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex1_fix.status == STARTED and t >= frameRemains:
ex1_fix.setAutoDraw(False)
# *ex1b* updates
if t >= 7.0 and ex1b.status == NOT_STARTED:
# keep track of start time/frame for later
ex1b.tStart = t
ex1b.frameNStart = frameN # exact frame index
ex1b.setAutoDraw(True)
frameRemains = 7.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex1b.status == STARTED and t >= frameRemains:
ex1b.setAutoDraw(False)
# *key_resp_4* updates
if t >= 7.0 and key_resp_4.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_4.tStart = t
key_resp_4.frameNStart = frameN # exact frame index
key_resp_4.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 7.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_4.status == STARTED and t >= frameRemains:
key_resp_4.status = STOPPED
if key_resp_4.status == STARTED:
theseKeys = event.getKeys(keyList=['b', 'g', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_4.keys = theseKeys[-1] # just the last key pressed
key_resp_4.rt = key_resp_4.clock.getTime()
# was this 'correct'?
if (key_resp_4.keys == str("'b'")) or (key_resp_4.keys == "'b'"):
key_resp_4.corr = 1
else:
key_resp_4.corr = 0
# *text_3* updates
if t >= 9.5 and text_3.status == NOT_STARTED:
# keep track of start time/frame for later
text_3.tStart = t
text_3.frameNStart = frameN # exact frame index
text_3.setAutoDraw(True)
frameRemains = 9.5 + 3.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text_3.status == STARTED and t >= frameRemains:
text_3.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr1_solidComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr1_solid"-------
for thisComponent in instr1_solidComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
# was no response the correct answer?!
if str("'g'").lower() == 'none':
key_resp_3.corr = 1 # correct non-response
else:
key_resp_3.corr = 0 # failed to respond (incorrectly)
# store data for thisExp (ExperimentHandler)
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
thisExp.addData('key_resp_3.corr', key_resp_3.corr)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.nextEntry()
# check responses
if key_resp_4.keys in ['', [], None]: # No response was made
key_resp_4.keys=None
# was no response the correct answer?!
if str("'b'").lower() == 'none':
key_resp_4.corr = 1 # correct non-response
else:
key_resp_4.corr = 0 # failed to respond (incorrectly)
# store data for thisExp (ExperimentHandler)
thisExp.addData('key_resp_4.keys',key_resp_4.keys)
thisExp.addData('key_resp_4.corr', key_resp_4.corr)
if key_resp_4.keys != None: # we had a response
thisExp.addData('key_resp_4.rt', key_resp_4.rt)
thisExp.nextEntry()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=2, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('block1_solid.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(2.500000)
# update component parameters for each repeat
dotimage.setImage(image)
key_resp_2 = event.BuilderKeyResponse()
# keep track of which components have finished
trialComponents = [dotimage, key_resp_2, fiximage]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *dotimage* updates
if t >= 1 and dotimage.status == NOT_STARTED:
# keep track of start time/frame for later
dotimage.tStart = t
dotimage.frameNStart = frameN # exact frame index
dotimage.setAutoDraw(True)
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if dotimage.status == STARTED and t >= frameRemains:
dotimage.setAutoDraw(False)
# *key_resp_2* updates
if t >= 1 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_2.status == STARTED and t >= frameRemains:
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['g', 'b', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# *fiximage* updates
if t >= .50 and fiximage.status == NOT_STARTED:
# keep track of start time/frame for later
fiximage.tStart = t
fiximage.frameNStart = frameN # exact frame index
fiximage.setAutoDraw(True)
frameRemains = .50 + .5- win.monitorFramePeriod * 0.75 # most of one frame period left
if fiximage.status == STARTED and t >= frameRemains:
fiximage.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
key_resp_2.corr = 1 # correct non-response
else:
key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('key_resp_2.keys',key_resp_2.keys)
trials.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# completed 2 repeats of 'trials'
# ------Prepare to start Routine "instr2_striped"-------
t = 0
instr2_stripedClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(12.500000)
# update component parameters for each repeat
ex_2_1_keyb = event.BuilderKeyResponse()
ex_2_2_keyb = event.BuilderKeyResponse()
# keep track of which components have finished
instr2_stripedComponents = [instr2_text1, ex_2_1, ex_2_1_keyb, ex2_fix, ex_2_2, ex_2_2_keyb, text_4]
for thisComponent in instr2_stripedComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr2_striped"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = instr2_stripedClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instr2_text1* updates
if t >= 0.0 and instr2_text1.status == NOT_STARTED:
# keep track of start time/frame for later
instr2_text1.tStart = t
instr2_text1.frameNStart = frameN # exact frame index
instr2_text1.setAutoDraw(True)
frameRemains = 0.0 + 4- win.monitorFramePeriod * 0.75 # most of one frame period left
if instr2_text1.status == STARTED and t >= frameRemains:
instr2_text1.setAutoDraw(False)
# *ex_2_1* updates
if t >= 4.0 and ex_2_1.status == NOT_STARTED:
# keep track of start time/frame for later
ex_2_1.tStart = t
ex_2_1.frameNStart = frameN # exact frame index
ex_2_1.setAutoDraw(True)
frameRemains = 4.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex_2_1.status == STARTED and t >= frameRemains:
ex_2_1.setAutoDraw(False)
# *ex_2_1_keyb* updates
if t >= 4.0 and ex_2_1_keyb.status == NOT_STARTED:
# keep track of start time/frame for later
ex_2_1_keyb.tStart = t
ex_2_1_keyb.frameNStart = frameN # exact frame index
ex_2_1_keyb.status = STARTED
# keyboard checking is just starting
win.callOnFlip(ex_2_1_keyb.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 4.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex_2_1_keyb.status == STARTED and t >= frameRemains:
ex_2_1_keyb.status = STOPPED
if ex_2_1_keyb.status == STARTED:
theseKeys = event.getKeys(keyList=['g', 'b', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
ex_2_1_keyb.keys = theseKeys[-1] # just the last key pressed
ex_2_1_keyb.rt = ex_2_1_keyb.clock.getTime()
# was this 'correct'?
if (ex_2_1_keyb.keys == str("'b'")) or (ex_2_1_keyb.keys == "'b'"):
ex_2_1_keyb.corr = 1
else:
ex_2_1_keyb.corr = 0
# *ex2_fix* updates
if t >= 6.0 and ex2_fix.status == NOT_STARTED:
# keep track of start time/frame for later
ex2_fix.tStart = t
ex2_fix.frameNStart = frameN # exact frame index
ex2_fix.setAutoDraw(True)
frameRemains = 6.0 + 1.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex2_fix.status == STARTED and t >= frameRemains:
ex2_fix.setAutoDraw(False)
# *ex_2_2* updates
if t >= 7.0 and ex_2_2.status == NOT_STARTED:
# keep track of start time/frame for later
ex_2_2.tStart = t
ex_2_2.frameNStart = frameN # exact frame index
ex_2_2.setAutoDraw(True)
frameRemains = 7.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex_2_2.status == STARTED and t >= frameRemains:
ex_2_2.setAutoDraw(False)
# *ex_2_2_keyb* updates
if t >= 7.0 and ex_2_2_keyb.status == NOT_STARTED:
# keep track of start time/frame for later
ex_2_2_keyb.tStart = t
ex_2_2_keyb.frameNStart = frameN # exact frame index
ex_2_2_keyb.status = STARTED
# keyboard checking is just starting
win.callOnFlip(ex_2_2_keyb.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 7.0 + 2- win.monitorFramePeriod * 0.75 # most of one frame period left
if ex_2_2_keyb.status == STARTED and t >= frameRemains:
ex_2_2_keyb.status = STOPPED
if ex_2_2_keyb.status == STARTED:
theseKeys = event.getKeys(keyList=['b', 'g', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
ex_2_2_keyb.keys = theseKeys[-1] # just the last key pressed
ex_2_2_keyb.rt = ex_2_2_keyb.clock.getTime()
# was this 'correct'?
if (ex_2_2_keyb.keys == str("'g'")) or (ex_2_2_keyb.keys == "'g'"):
ex_2_2_keyb.corr = 1
else:
ex_2_2_keyb.corr = 0
# *text_4* updates
if t >= 9.5 and text_4.status == NOT_STARTED:
# keep track of start time/frame for later
text_4.tStart = t
text_4.frameNStart = frameN # exact frame index
text_4.setAutoDraw(True)
frameRemains = 9.5 + 3.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text_4.status == STARTED and t >= frameRemains:
text_4.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr2_stripedComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr2_striped"-------
for thisComponent in instr2_stripedComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ex_2_1_keyb.keys in ['', [], None]: # No response was made
ex_2_1_keyb.keys=None
# was no response the correct answer?!
if str("'b'").lower() == 'none':
ex_2_1_keyb.corr = 1 # correct non-response
else:
ex_2_1_keyb.corr = 0 # failed to respond (incorrectly)
# store data for thisExp (ExperimentHandler)
thisExp.addData('ex_2_1_keyb.keys',ex_2_1_keyb.keys)
thisExp.addData('ex_2_1_keyb.corr', ex_2_1_keyb.corr)
if ex_2_1_keyb.keys != None: # we had a response
thisExp.addData('ex_2_1_keyb.rt', ex_2_1_keyb.rt)
thisExp.nextEntry()
# check responses
if ex_2_2_keyb.keys in ['', [], None]: # No response was made
ex_2_2_keyb.keys=None
# was no response the correct answer?!
if str("'g'").lower() == 'none':
ex_2_2_keyb.corr = 1 # correct non-response
else:
ex_2_2_keyb.corr = 0 # failed to respond (incorrectly)
# store data for thisExp (ExperimentHandler)
thisExp.addData('ex_2_2_keyb.keys',ex_2_2_keyb.keys)
thisExp.addData('ex_2_2_keyb.corr', ex_2_2_keyb.corr)
if ex_2_2_keyb.keys != None: # we had a response
thisExp.addData('ex_2_2_keyb.rt', ex_2_2_keyb.rt)
thisExp.nextEntry()
# set up handler to look after randomisation of conditions etc
trials_2 = data.TrialHandler(nReps=2, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('block2_stripe.xlsx'),
seed=None, name='trials_2')
thisExp.addLoop(trials_2) # add the loop to the experiment
thisTrial_2 = trials_2.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
for thisTrial_2 in trials_2:
currentLoop = trials_2
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(2.500000)
# update component parameters for each repeat
dotimage.setImage(image)
key_resp_2 = event.BuilderKeyResponse()
# keep track of which components have finished
trialComponents = [dotimage, key_resp_2, fiximage]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *dotimage* updates
if t >= 1 and dotimage.status == NOT_STARTED:
# keep track of start time/frame for later
dotimage.tStart = t
dotimage.frameNStart = frameN # exact frame index
dotimage.setAutoDraw(True)
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if dotimage.status == STARTED and t >= frameRemains:
dotimage.setAutoDraw(False)
# *key_resp_2* updates
if t >= 1 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_2.status == STARTED and t >= frameRemains:
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['g', 'b', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# *fiximage* updates
if t >= .50 and fiximage.status == NOT_STARTED:
# keep track of start time/frame for later
fiximage.tStart = t
fiximage.frameNStart = frameN # exact frame index
fiximage.setAutoDraw(True)
frameRemains = .50 + .5- win.monitorFramePeriod * 0.75 # most of one frame period left
if fiximage.status == STARTED and t >= frameRemains:
fiximage.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
key_resp_2.corr = 1 # correct non-response
else:
key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials_2 (TrialHandler)
trials_2.addData('key_resp_2.keys',key_resp_2.keys)
trials_2.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials_2.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# completed 2 repeats of 'trials_2'
# ------Prepare to start Routine "instr3_mixed"-------
t = 0
instr3_mixedClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(3.000000)
# update component parameters for each repeat
# keep track of which components have finished
instr3_mixedComponents = [text_5]
for thisComponent in instr3_mixedComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr3_mixed"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = instr3_mixedClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_5* updates
if t >= 0.0 and text_5.status == NOT_STARTED:
# keep track of start time/frame for later
text_5.tStart = t
text_5.frameNStart = frameN # exact frame index
text_5.setAutoDraw(True)
frameRemains = 0.0 + 3.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text_5.status == STARTED and t >= frameRemains:
text_5.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr3_mixedComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr3_mixed"-------
for thisComponent in instr3_mixedComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# set up handler to look after randomisation of conditions etc
trials_3 = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('block3_mixed.xlsx'),
seed=None, name='trials_3')
thisExp.addLoop(trials_3) # add the loop to the experiment
thisTrial_3 = trials_3.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial_3.rgb)
if thisTrial_3 != None:
for paramName in thisTrial_3:
exec('{} = thisTrial_3[paramName]'.format(paramName))
for thisTrial_3 in trials_3:
currentLoop = trials_3
# abbreviate parameter names if possible (e.g. rgb = thisTrial_3.rgb)
if thisTrial_3 != None:
for paramName in thisTrial_3:
exec('{} = thisTrial_3[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(2.500000)
# update component parameters for each repeat
dotimage.setImage(image)
key_resp_2 = event.BuilderKeyResponse()
# keep track of which components have finished
trialComponents = [dotimage, key_resp_2, fiximage]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *dotimage* updates
if t >= 1 and dotimage.status == NOT_STARTED:
# keep track of start time/frame for later
dotimage.tStart = t
dotimage.frameNStart = frameN # exact frame index
dotimage.setAutoDraw(True)
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if dotimage.status == STARTED and t >= frameRemains:
dotimage.setAutoDraw(False)
# *key_resp_2* updates
if t >= 1 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = 1 + 1.5- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_2.status == STARTED and t >= frameRemains:
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['g', 'b', '1', '2'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
key_resp_2.keys = theseKeys[-1] # just the last key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrAns)) or (key_resp_2.keys == corrAns):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# *fiximage* updates
if t >= .50 and fiximage.status == NOT_STARTED:
# keep track of start time/frame for later
fiximage.tStart = t
fiximage.frameNStart = frameN # exact frame index
fiximage.setAutoDraw(True)
frameRemains = .50 + .5- win.monitorFramePeriod * 0.75 # most of one frame period left
if fiximage.status == STARTED and t >= frameRemains:
fiximage.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
key_resp_2.corr = 1 # correct non-response
else:
key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for trials_3 (TrialHandler)
trials_3.addData('key_resp_2.keys',key_resp_2.keys)
trials_3.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
trials_3.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.nextEntry()
# completed 1 repeats of 'trials_3'
# ------Prepare to start Routine "goodbye"-------
t = 0
goodbyeClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
# keep track of which components have finished
goodbyeComponents = [text_6]
for thisComponent in goodbyeComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "goodbye"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = goodbyeClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_6* updates
if t >= 0.0 and text_6.status == NOT_STARTED:
# keep track of start time/frame for later
text_6.tStart = t
text_6.frameNStart = frameN # exact frame index
text_6.setAutoDraw(True)
frameRemains = 0.0 + 4.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if text_6.status == STARTED and t >= frameRemains:
text_6.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in goodbyeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "goodbye"-------
for thisComponent in goodbyeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| djangraw/PsychoPyParadigms | EegFmriExperiments/BostonDots3.1/BostonDots3_lastrun.py | Python | mit | 47,905 |
#
# camera.py: for collecting training data
#
import time
from picamera import PiCamera
camera = PiCamera()
while(True):
time.sleep(0.1)
filename = str(time.time()) + '.jpg'
camera.capture(filename)
print(filename)
| kazunori279/ml-misc | toilet/camera.py | Python | apache-2.0 | 226 |
#!/usr/bin/env python
"""
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://dataplane.org/*.txt"
__check__ = "DataPlane.org"
__info__ = "known attacker"
__reference__ = "dataplane.org"
def fetch():
retval = {}
for url in ("https://dataplane.org/dnsrd.txt", "https://dataplane.org/dnsrdany.txt", "https://dataplane.org/dnsversion.txt", "https://dataplane.org/sipinvitation.txt", "https://dataplane.org/sipquery.txt", "https://dataplane.org/sipregistration.txt", "https://dataplane.org/smtpdata.txt", "https://dataplane.org/smtpgreet.txt", "https://dataplane.org/sshclient.txt", "https://dataplane.org/sshpwauth.txt", "https://dataplane.org/telnetlogin.txt", "https://dataplane.org/vncrfb.txt"):
content = retrieve_content(url)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line or '|' not in line:
continue
retval[line.split('|')[2].strip()] = (__info__, __reference__)
return retval
| stamparm/maltrail | trails/feeds/dataplane.py | Python | mit | 1,139 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['LARS']
from typing import List
import jax.numpy as jn
from objax.module import Module, ModuleList
from objax.typing import JaxArray
from objax.util import class_name
from objax.variable import TrainRef, StateVar, TrainVar, VarCollection
class LARS(Module):
"""Layerwise adaptive rate scaling (LARS) optimizer.
See https://arxiv.org/abs/1708.03888
"""
def __init__(self, vc: VarCollection,
momentum: float = 0.9,
weight_decay: float = 1e-4,
tc: float = 1e-3,
eps: float = 1e-5):
"""Constructor for LARS optimizer.
Args:
vc: collection of variables to optimize.
momentum: coefficient used for the moving average of the gradient.
weight_decay: weight decay coefficient.
tc: trust coefficient eta ( < 1) for trust ratio computation.
eps: epsilon used for trust ratio computation.
"""
self.momentum = momentum
self.weight_decay = weight_decay
self.tc = tc
self.eps = eps
self.train_vars = ModuleList(TrainRef(x) for x in vc.subset(TrainVar))
self.m = ModuleList(StateVar(jn.zeros_like(x.value)) for x in self.train_vars)
def __call__(self, lr: float, grads: List[JaxArray]):
"""Updates variables based on LARS algorithm.
Args:
lr: learning rate. The LARS paper suggests using lr = lr_0 * (1 -t/T)**2,
where t is the current epoch number and T the maximum number of epochs.
grads: the gradients to apply.
"""
assert len(grads) == len(self.train_vars), 'Expecting as many gradients as trainable variables'
for g, p, m in zip(grads, self.train_vars, self.m):
p_norm = jn.linalg.norm(p.value)
g_norm = jn.linalg.norm(g)
trust_ratio = self.tc * p_norm / (g_norm + self.weight_decay * p_norm + self.eps)
local_lr = lr * jn.maximum(jn.logical_or(p_norm == 0, g_norm == 0), trust_ratio)
m.value = self.momentum * m.value + local_lr * (g + self.weight_decay * p.value)
p.value -= m.value
def __repr__(self):
return f'{class_name(self)}(momentum={self.momentum}, weight_decay={self.weight_decay}, ' \
f'tc={self.tc}, eps={self.eps})'
| google/objax | objax/optimizer/lars.py | Python | apache-2.0 | 2,914 |
import unittest
from itertools import chain
from board import Board
class BoardTest(unittest.TestCase):
def test_board_width(self):
board = Board(6,5)
self.assertEqual(len(board.blocks), 5)
def test_board_height(self):
board = Board(6,5)
self.assertTrue(all(map(lambda x: len(x) == 6, board.blocks)))
def test_board_elements_all_none(self):
board = Board(6,5)
self.assertTrue(all(map(lambda x: x is None, chain(*board.blocks))))
if __name__ == '__main__':
unittest.main() | velinakd/candy-crush-2048 | tests/board_test.py | Python | mit | 541 |
""" Investable Server """
from flask import Flask, render_template, redirect, flash, request, jsonify, json
from flask_debugtoolbar import DebugToolbarExtension
import jinja2
import os
import geocoder
from zillow_utilities import *
from account_utilities import *
from mortgage_calculator import *
from db_queries import *
from model import *
app = Flask(__name__)
app.secret_key = os.environ['APP_KEY']
app.zwsid = os.environ['ZWSID']
app.gmaps = os.environ['GMAPS_JS']
# jinja debugger
app.jinja_env.undefined = jinja2.StrictUndefined
app.jinja_env.auto_reload = True
##############################################################################
# Route definitions
@app.route('/')
def homepage():
""" Brings user to the homepage. """
return render_template('index.html', GMAPS_JS=app.gmaps)
@app.route('/search.json')
def search():
""" Returns user search results from Zillow's API and PostgreSQL. """
# Search address entered by the user
full_address = {}
full_address.update(request.args.items())
# Gets API response data from zillow_utilities
response_code, price, message, hoa = get_unit_price(full_address)
# If the location is found in Zillow's API
if response_code == 100:
unit_details = get_zillow_unit_details(full_address)
# Returns the response code and unit details from Zillow's API and PostgreSQL
listing = { 'response': response_code,
'price': price,
'message': message,
'neighborhood': unit_details['neighborhood'],
'street': unit_details['street'],
'city': unit_details['city'],
'state': unit_details['state'],
'zipcode': unit_details['zipcode'],
'bedrooms': unit_details['bedrooms'],
'bathrooms': unit_details['bathrooms'],
'sqft': unit_details['sqft'],
'hoa': hoa,
'latitude': unit_details['latitude'],
'longitude': unit_details['longitude'],
'latlng_point': unit_details['latlng_point'],
'zpid': unit_details['zpid']
}
# Adds a listing to the database
add_listing_to_db(listing)
else:
listing = { 'response': response_code, 'price': price, 'message': message }
return jsonify(listing)
@app.route('/avgrent.json')
def get_rent_avgs():
""" Gets average rent data from db_queries. """
listing = request.args.get('listing')
listing_dict = json.loads(listing)
# If source data contains a latlng_point
if listing_dict.get('latlng_point'):
latlng_point = listing_dict['latlng_point']
# Otherwise, make a latlng point
else:
latlng_point = 'POINT({} {})'.format(listing_dict['latitude'],listing_dict['longitude'])
rent_avgs = get_avg_rent(listing_dict['bedrooms'], listing_dict['bathrooms'], listing_dict['sqft'], latlng_point)
return jsonify(rent_avgs)
@app.route('/listings.json')
def get_listings():
""" Finds listings in the area filtered by bedrooms, bathrooms, and/or prices. """
bounds = json.loads(request.args.get('geoBounds'))
bedrooms = float(request.args.get('bedroomFilter'))
bathrooms = float(request.args.get('bathroomFilter'))
low_price = int(request.args.get('lowPrice'))
high_price = int(request.args.get('highPrice'))
# Retrieves listings from db_queries
filtered_listings = find_all_listings(bounds, bedrooms, bathrooms, low_price, high_price)
return jsonify(filtered_listings)
@app.route('/calculator')
def calculate_monthly_payment():
""" Calculates the monthly mortgage payment based on user's details. """
# User mortgage data pulled from AJAX
mortgage_details = {}
mortgage_details.update(request.args.items())
mortgage, total_mortgage = calculate_mortgage(mortgage_details)
return jsonify({ 'mortgage': mortgage, 'total_mortgage': total_mortgage })
##############################################################################
# Helper functions
if __name__ == "__main__":
app.debug = False
# Use the DebugToolbar
DebugToolbarExtension(app)
# Connect DB to Flask before running app
connect_to_db_flask(app, os.environ.get("DATABASE_URL"))
PORT = int(os.environ.get('PORT', 5000))
app.run(host="0.0.0.0", port=PORT)
| jttyeung/investable | server.py | Python | mit | 4,455 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver of srpcgen for ppapi_proxy sources.
This must be run after modifying, adding, or removing .srpc files.
The lists of .srpc files are in this source file.
"""
import filecmp
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
# This lists all the units (one header file, one source file) to generate.
# There is a trusted pair of files and an untrusted pair of files for each.
# Each element has these keys:
# trusted_is_client True if the trusted side is the client
# and the untrusted side is the server;
# False if the untrusted side is the client
# and the trusted side is the server.
# name Prefix of the main class name, suffixed
# by Server or Client.
# file_basename Prefix of the output file names, suffixed
# by .h, _server.cc, _client.cc.
# srpc_files List of .srpc input file names.
# client_thread_check (optional) True if the client side should
# be generated with --thread-check.
# This asserts that calls are on the main thread.
all_units = [
{'trusted_is_client': True,
'name': 'PppRpcs',
'file_basename': 'ppp_rpc',
'srpc_files': [
'completion_callback.srpc',
'ppp.srpc',
'ppp_audio.srpc',
'ppp_find.srpc',
'ppp_input_event.srpc',
'ppp_instance.srpc',
'ppp_messaging.srpc',
'ppp_mouse_lock.srpc',
'ppp_printing.srpc',
'ppp_scrollbar.srpc',
'ppp_selection.srpc',
'ppp_widget.srpc',
'ppp_zoom.srpc',
]},
{'trusted_is_client': False,
'client_thread_check': True,
'name': 'PpbRpcs',
'file_basename': 'ppb_rpc',
'srpc_files': [
'ppb.srpc',
'ppb_audio.srpc',
'ppb_audio_config.srpc',
'ppb_core.srpc',
'ppb_file_io.srpc',
'ppb_file_ref.srpc',
'ppb_file_system.srpc',
'ppb_find.srpc',
'ppb_font.srpc',
'ppb_fullscreen.srpc',
'ppb_gamepad.srpc',
'ppb_graphics_2d.srpc',
'ppb_graphics_3d.srpc',
'ppb_host_resolver_private.srpc',
'ppb_image_data.srpc',
'ppb_input_event.srpc',
'ppb_instance.srpc',
'ppb_messaging.srpc',
'ppb_mouse_cursor.srpc',
'ppb_mouse_lock.srpc',
'ppb_net_address_private.srpc',
'ppb_pdf.srpc',
'ppb_scrollbar.srpc',
'ppb_tcp_server_socket_private.srpc',
'ppb_tcp_socket_private.srpc',
'ppb_testing.srpc',
'ppb_udp_socket_private.srpc',
'ppb_url_loader.srpc',
'ppb_url_request_info.srpc',
'ppb_url_response_info.srpc',
'ppb_websocket.srpc',
'ppb_widget.srpc',
'ppb_zoom.srpc',
]},
{'trusted_is_client': False,
'name': 'PpbUpcalls',
'file_basename': 'upcall',
'srpc_files': [
'upcall.srpc',
]},
]
def GeneratorForUnit(options, unit, is_trusted, output_dir):
header_file_name = unit['file_basename'] + '.h'
server_file_name = unit['file_basename'] + '_server.cc'
client_file_name = unit['file_basename'] + '_client.cc'
header_guard = 'GEN_PPAPI_PROXY_%s_H_' % unit['file_basename'].upper()
is_client = is_trusted == unit['trusted_is_client']
thread_check = unit.get('client_thread_check', False) == is_client
header_dir = 'trusted' if is_trusted else 'untrusted'
header = os.path.join(header_dir, 'srpcgen', header_file_name)
source = client_file_name if is_client else server_file_name
command = [
sys.executable, options.srpcgen,
'--ppapi',
'--include=' + '/'.join([header_dir, 'srpcgen', header_file_name]),
'-c' if is_client else '-s',
]
if thread_check:
command.append('--thread-check')
command += [
unit['name'],
header_guard,
os.path.join(output_dir, header),
os.path.join(output_dir, source),
]
command += [os.path.join(options.source_dir, file)
for file in unit['srpc_files']]
return command, [header, source]
def RunOneUnit(options, unit, is_trusted):
result = 0
if options.diff_mode:
temp_dir = tempfile.mkdtemp(prefix='srpcdiff')
try:
command, files = GeneratorForUnit(options, unit, is_trusted, temp_dir)
result = subprocess.call(command)
if result != 0:
print 'Command failed: ' + ' '.join(command)
else:
for file in files:
output_file = os.path.join(options.output_dir, file)
generated_file = os.path.join(temp_dir, file)
if not filecmp.cmp(output_file, generated_file, shallow=False):
print '%s is out of date' % output_file
result = 1
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
command, _ = GeneratorForUnit(options, unit, is_trusted, options.output_dir)
print 'Run: ' + ' '.join(command)
if not options.dry_run:
result = subprocess.call(command)
return result
def RunUnits(options, is_trusted):
result = 0
for unit in all_units:
this_result = RunOneUnit(options, unit, is_trusted)
if this_result != 0:
print 'Error %d on %s.' % (this_result, unit['name'])
result = this_result
return result
def RunAll(options):
trusted_result = RunUnits(options, True)
untrusted_result = RunUnits(options, False)
return trusted_result or untrusted_result
def Main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options]')
parser.add_option('-o', '--output_dir',
help='Directory to receive output files.')
parser.add_option('-d', '--source_dir',
help='Directory containing .srpc files.',
default=os.path.dirname(argv[0]))
parser.add_option('-s', '--srpcgen',
help='Path to the srpcgen.py script.')
parser.add_option('-n', '--dry_run', action='store_true',
help='Just print commands instead of running them.',
default=False)
parser.add_option('-c', '--diff_mode', action='store_true',
help='Touch no files, but complain if they would differ',
default=False)
options, args = parser.parse_args(argv[1:])
# Get rid of any excess ../ elements so the directory names are
# shorter and more obvious to the eye.
options.source_dir = os.path.normpath(options.source_dir)
if options.output_dir is None:
options.output_dir = options.source_dir
if options.srpcgen is None:
options.srpcgen = os.path.normpath(os.path.join(options.source_dir,
'..', '..', 'tools',
'srpcgen.py'))
if args:
parser.print_help()
return 1
return RunAll(options)
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| keishi/chromium | ppapi/native_client/src/shared/ppapi_proxy/run_srpcgen.py | Python | bsd-3-clause | 7,278 |
from django.apps import AppConfig
from django.db.models import ForeignKey
from django.utils.translation import gettext_lazy as _
from . import checks, get_image_model # NOQA
from .signal_handlers import register_signal_handlers
class WagtailImagesAppConfig(AppConfig):
name = "wagtail.images"
label = "wagtailimages"
verbose_name = _("Wagtail images")
default_auto_field = "django.db.models.AutoField"
def ready(self):
register_signal_handlers()
# Set up model forms to use AdminImageChooser for any ForeignKey to the image model
from wagtail.admin.forms.models import register_form_field_override
from .widgets import AdminImageChooser
Image = get_image_model()
register_form_field_override(
ForeignKey, to=Image, override={"widget": AdminImageChooser}
)
# Set up image ForeignKeys to use ImageFieldComparison as the comparison class
# when comparing page revisions
from wagtail.admin.compare import register_comparison_class
from .edit_handlers import ImageFieldComparison
register_comparison_class(
ForeignKey, to=Image, comparison_class=ImageFieldComparison
)
| wagtail/wagtail | wagtail/images/apps.py | Python | bsd-3-clause | 1,223 |
import asyncio
import logging
from notaol.p3.packet import Packet, HEADER_LENGTH, HEADER_SIZE_OFFSET
_logger = logging.getLogger(__name__)
class Stream(object):
'''Connection handler.'''
def __init__(self, host='AmericaOnline.aol.com', port='5190'):
self._host = host
self._port = port
self._reader = None
self._writer = None
def closed(self):
'''Return whether the connection is closed.'''
return not self._reader or self._reader.is_eof()
def close(self):
'''Close the connection.'''
if self._reader:
_logger.debug('Close connection.')
self._reader = None
self._writer.close()
self._writer = None
@asyncio.coroutine
def connect(self):
'''Connect to the service.
Coroutine.
'''
_logger.debug('Connect.')
self._reader, self._writer = yield from asyncio.open_connection(
self._host, self._port)
_logger.debug('Connected.')
@asyncio.coroutine
def write_packet(self, packet):
'''Send a packet.
Coroutine.
'''
_logger.debug('Write packet %s', packet)
self._writer.write(packet.to_bytes())
yield from self._writer.drain()
@asyncio.coroutine
def read_packet(self):
'''Receive a packet.'''
_logger.debug('Begin read packet.')
packet = Packet()
header = yield from self._reader.readexactly(HEADER_LENGTH)
packet.parse_header(header)
_logger.debug('Got header %s', packet)
# header is 8 bytes + data + stop byte
bytes_to_read = packet.length - HEADER_SIZE_OFFSET + 1
_logger.debug('Need to read %d bytes', bytes_to_read)
data = yield from self._reader.readexactly(bytes_to_read)
packet.parse_body(data)
_logger.debug('Got packet %s', packet)
return packet
| chfoo/notaol | notaol/p3/stream.py | Python | gpl-3.0 | 1,932 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2012-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from .pidfile import PidFile # NOQA
from .exceptions import DaemonError # NOQA
from .daemon import DaemonContext # NOQA
| edgedb/edgedb | edb/server/daemon/__init__.py | Python | apache-2.0 | 840 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import unittest
import requests
sys.path.append(os.path.abspath('../nematus'))
from translate import main as translate
from settings import TranslationSettings
def load_wmt16_model(src, target):
path = os.path.join('models', '{0}-{1}'.format(src,target))
try:
os.makedirs(path)
except OSError:
pass
for filename in ['model.npz', 'model.npz.json', 'vocab.{0}.json'.format(src), 'vocab.{0}.json'.format(target)]:
if not os.path.exists(os.path.join(path, filename)):
r = requests.get('http://data.statmt.org/rsennrich/wmt16_systems/{0}-{1}/'.format(src,target) + filename, stream=True)
with open(os.path.join(path, filename), 'wb') as f:
for chunk in r.iter_content(1024**2):
f.write(chunk)
class TestTranslate(unittest.TestCase):
"""
Regression tests for translation with WMT16 models
"""
def setUp(self):
"""
Download pre-trained models
"""
load_wmt16_model('en','de')
load_wmt16_model('en','ro')
def outputEqual(self, output1, output2):
"""given two translation outputs, check that output string is identical,
and probabilities are equal within rounding error.
"""
for i, (line, line2) in enumerate(zip(open(output1).readlines(), open(output2).readlines())):
if not i % 2:
self.assertEqual(line, line2)
else:
probs = map(float, line.split())
probs2 = map(float, line.split())
for p, p2 in zip(probs, probs2):
self.assertAlmostEqual(p, p2, 5)
def get_settings(self):
"""
Initialize and customize settings.
"""
translation_settings = TranslationSettings()
translation_settings.models = ["model.npz"]
translation_settings.num_processes = 1
translation_settings.beam_width = 12
translation_settings.normalization_alpha = 1.0
translation_settings.suppress_unk = True
translation_settings.get_word_probs = True
return translation_settings
# English-German WMT16 system, no dropout
def test_ende(self):
os.chdir('models/en-de/')
translation_settings = self.get_settings()
translate(
input_file=open('../../en-de/in'),
output_file=open('../../en-de/out','w'),
translation_settings=translation_settings
)
os.chdir('../..')
self.outputEqual('en-de/ref','en-de/out')
# English-Romanian WMT16 system, dropout
def test_enro(self):
os.chdir('models/en-ro/')
translation_settings = self.get_settings()
translate(
input_file=open('../../en-ro/in'),
output_file=open('../../en-ro/out','w'),
translation_settings=translation_settings
)
os.chdir('../..')
self.outputEqual('en-ro/ref','en-ro/out')
if __name__ == '__main__':
unittest.main()
| Proyag/nematus | test/test_translate.py | Python | bsd-3-clause | 3,188 |
import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE%s list." % ("_CLASSES" if settings.MIDDLEWARE is None else "")
super().__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, str):
return json.loads(data, cls=MessageDecoder)
return data
| auready/django | django/contrib/messages/storage/session.py | Python | bsd-3-clause | 1,733 |
from mongoengine import connect, fields
from mongoengine.document import Document
from wtforms import fields as form_fields, validators
from wtforms.form import Form
from flask_views.tests.functional.base import BaseTestCase
class BaseMongoTestCase(BaseTestCase):
"""
Base test-case class for MongoDB tests.
"""
def setUp(self):
super(BaseMongoTestCase, self).setUp()
self.db = connect('brocaar_flask_views_test')
class TestDocument(Document):
username = fields.StringField(
verbose_name='Username',
required=True,
)
name = fields.StringField(
verbose_name='Name',
required=True,
)
class TestForm(Form):
username = form_fields.TextField(
'Username', [validators.required()])
name = form_fields.TextField(
'Name', [validators.required()])
self.TestDocument = TestDocument
self.TestForm = TestForm
def tearDown(self):
self.TestDocument.drop_collection()
| brocaar/flask-views | flask_views/tests/functional/db/mongoengine/base.py | Python | bsd-3-clause | 1,104 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Nicolas Badoux <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import models
from . import controllers
| ecino/compassion-switzerland | payment_ogone_compassion/__init__.py | Python | agpl-3.0 | 401 |
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.kb.business import update_kb_item
from skf.api.kb.serializers import kb_update, message
from skf.api.kb.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_alpha, val_alpha_num, val_alpha_num_special
ns = api.namespace('kb', description='Operations related to kb items')
@ns.route('/update/<int:kb_id>')
@api.doc(params={'id': 'The kb item id'})
@api.response(404, 'Validation error', message)
class KBItemUpdate(Resource):
@api.expect(authorization, kb_update)
@api.marshal_with(message, 'Success')
@api.response(400, 'No results found', message)
def put(self, kb_id):
"""
Update a kb item.
* Privileges required: **edit**
"""
data = request.json
val_num(kb_id)
val_alpha_num_special(data.get('title'))
validate_privilege(self, 'edit')
result = update_kb_item(kb_id, data)
return result, 200, security_headers()
| blabla1337/skf-flask | skf/api/kb/endpoints/kb_item_update.py | Python | agpl-3.0 | 1,112 |
# Copyright (C) 2017 Alexander Kuvaev ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame, common
from game import Game
from camera import Camera
from uicontroller import UiController
class Visualizer:
@staticmethod
def draw(surface):
surface.fill(pygame.Color(Game.background_color))
tiles = Game.level.get_visible(Camera.pos, common.WIDTH, common.HEIGHT)
for tile in tiles:
surface.blit(tile.image, (tile.topleft - Camera.pos).as_tuple())
surface.blit(Game.player_image(), (Game.level.get_tile_coord(Game.player_tile) - Camera.pos).as_tuple())
UiController.draw_screen(surface)
return
| Vinatorul/Codpled | src/visualizer.py | Python | gpl-3.0 | 1,296 |
from collections import OrderedDict
from os.path import join as pjoin
from . import utils
import theano
from .status import Status
from .stopping_criteria import TrainingExit
class Trainer(object):
def __init__(self, optimizer, batch_scheduler, status=None):
self.status = status if status is not None else Status(self)
self._optimizer = optimizer
self._batch_scheduler = batch_scheduler
# Gather updates from the optimizer and the batch scheduler.
self._graph_updates = OrderedDict()
self._graph_updates.update(self._optimizer.updates)
self._graph_updates.update(self._batch_scheduler.updates)
# Gather tasks from the optimizer and the batch scheduler.
self._tasks = []
self._tasks.extend(self._optimizer.tasks)
self._tasks.extend(self._batch_scheduler.tasks)
self._learn = None
def train(self):
self._pre_learning()
self._learning()
self._post_learning()
def append_task(self, *tasks):
self._tasks.extend(tasks)
def build_theano_graph(self):
# Get updates from tasks.
for task in self._tasks:
self._graph_updates.update(task.updates)
self._learn = theano.function([],
updates=self._graph_updates,
givens=self._batch_scheduler.givens,
name="learn")
#theano.printing.pydotprint(self._learn, '{0}_learn_{1}'.format(self._optimizer.loss.model.__class__.__name__, theano.config.device), with_ids=True)
def save(self, path):
savedir = utils.create_folder(pjoin(path, "training"))
self.status.save(savedir)
self._optimizer.save(savedir)
self._batch_scheduler.save(savedir)
tasks_dir = utils.create_folder(pjoin(savedir, 'tasks'))
for task in self._tasks:
task.save(tasks_dir)
def load(self, path):
loaddir = pjoin(path, "training")
self.status.load(loaddir)
self._optimizer.load(loaddir)
self._batch_scheduler.load(loaddir)
tasks_dir = pjoin(loaddir, 'tasks')
for task in self._tasks:
task.load(tasks_dir)
def _pre_learning(self):
if self._learn is None:
self.build_theano_graph()
# Only initialize tasks if not resuming
if self.status.current_update == 0:
self._init_tasks()
def _learning(self):
# Learning
try:
while True: # Runs until a TrainingExit exception is raised (usually inside a Task)
self.status.increment_epoch()
self._pre_epoch_tasks()
for _ in self._batch_scheduler:
self.status.increment_update()
self._pre_update_tasks()
self._learn()
self._post_update_tasks()
self._post_epoch_tasks()
except TrainingExit:
pass
def _post_learning(self):
self._finished_tasks()
self.status.done = True
def _init_tasks(self):
for task in self._tasks:
task.init(self.status)
def _pre_epoch_tasks(self):
for task in self._tasks:
task.pre_epoch(self.status)
def _pre_update_tasks(self):
for task in self._tasks:
task.pre_update(self.status)
def _post_update_tasks(self):
for task in self._tasks:
task.post_update(self.status)
def _post_epoch_tasks(self):
for task in self._tasks:
task.post_epoch(self.status)
def _finished_tasks(self):
for task in self._tasks:
task.finished(self.status)
| ASalvail/smartlearner | smartlearner/trainer.py | Python | bsd-3-clause | 3,751 |
from airy.core.conf import settings
from mongoengine import *
connect(getattr(settings, 'database_name', 'airy'))
| letolab/airy | airy/core/db.py | Python | bsd-2-clause | 116 |
"""
This script runs sanity checks all datasets in a directory.
Usage:
python check_antmaze_datasets.py <dirname>
"""
import numpy as np
import scipy as sp
import scipy.spatial
import h5py
import os
import argparse
def check_identical_values(dset):
""" Check that values are not identical """
check_keys = ['actions', 'observations', 'infos/qpos', 'infos/qvel']
for k in check_keys:
values = dset[k][:]
values_0 = values[0]
values_mid = values[values.shape[0]//2]
values_last = values[-1]
values = np.c_[values_0, values_mid, values_last].T
dists = sp.spatial.distance.pdist(values)
not_same = dists > 0
assert np.all(not_same)
def check_num_samples(dset):
""" Check that all keys have the same # samples """
check_keys = ['actions', 'observations', 'rewards', 'timeouts', 'terminals', 'infos/qpos', 'infos/qvel']
N = None
for k in check_keys:
values = dset[k]
if N is None:
N = values.shape[0]
else:
assert values.shape[0] == N
def check_reset_nonterminal(dataset):
""" Check if a reset occured on a non-terminal state."""
positions = dataset['observations'][:-1,0:2]
next_positions = dataset['observations'][1:,0:2]
diffs = np.linalg.norm(positions-next_positions, axis=1)
terminal = ((dataset['terminals'][:] + dataset['timeouts'][:]) > 0)[:-1]
num_resets = np.sum(diffs > 5.0)
num_nonterminal_reset = np.sum( (diffs > 5.0) * (1-terminal))
print('num reset:', num_resets)
print('nonreset term:', num_nonterminal_reset)
assert num_nonterminal_reset == 0
def print_avg_returns(dset):
""" Print returns for manual sanity checking. """
rew = dset['rewards'][:]
terminals = dset['terminals'][:]
timeouts = dset['timeouts'][:]
end_episode = (timeouts + terminals) > 0
all_returns = []
returns = 0
for i in range(rew.shape[0]):
returns += float(rew[i])
if end_episode[i]:
all_returns.append(returns)
returns = 0
print('Avg returns:', np.mean(all_returns))
print('# timeout:', np.sum(timeouts))
print('# terminals:', np.sum(terminals))
CHECK_FNS = [print_avg_returns, check_reset_nonterminal, check_identical_values, check_num_samples]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dirname', type=str, help='Directory containing HDF5 datasets')
args = parser.parse_args()
dirname = args.dirname
for fname in os.listdir(dirname):
if fname.endswith('.hdf5'):
hfile = h5py.File(os.path.join(dirname, fname))
print('Checking:', fname)
for check_fn in CHECK_FNS:
try:
check_fn(hfile)
except AssertionError as e:
print('Failed test:', check_fn.__name__)
#raise e
| rail-berkeley/d4rl | scripts/check_antmaze_datasets.py | Python | apache-2.0 | 2,930 |
""" Generic Unification algorithm for expression trees with lists of children
This implementation is a direct translation of
Artificial Intelligence: A Modern Approach by Stuart Russel and Peter Norvig
Second edition, section 9.2, page 276
It is modified in the following ways:
1. We allow associative and commutative Compound expressions. This results in
combinatorial blowup.
2. We explore the tree lazily.
3. We provide generic interfaces to symbolic algebra libraries in Python.
A more traditional version can be found here
http://aima.cs.berkeley.edu/python/logic.html
"""
from __future__ import print_function, division
from sympy.utilities.iterables import kbins
from sympy.core.compatibility import range
class Compound(object):
""" A little class to represent an interior node in the tree
This is analagous to SymPy.Basic for non-Atoms
"""
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (type(self) == type(other) and self.op == other.op and
self.args == other.args)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s[%s]" % (str(self.op), ', '.join(map(str, self.args)))
class Variable(object):
""" A Wild token """
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return type(self) == type(other) and self.arg == other.arg
def __hash__(self):
return hash((type(self), self.arg))
def __str__(self):
return "Variable(%s)" % str(self.arg)
class CondVariable(object):
""" A wild token that matches conditionally
arg - a wild token
valid - an additional constraining function on a match
"""
def __init__(self, arg, valid):
self.arg = arg
self.valid = valid
def __eq__(self, other):
return (type(self) == type(other) and
self.arg == other.arg and
self.valid == other.valid)
def __hash__(self):
return hash((type(self), self.arg, self.valid))
def __str__(self):
return "CondVariable(%s)" % str(self.arg)
def unify(x, y, s=None, **fns):
""" Unify two expressions
inputs:
x, y - expression trees containing leaves, Compounds and Variables
s - a mapping of variables to subtrees
outputs:
lazy sequence of mappings {Variable: subtree}
Examples
========
>>> from sympy.unify.core import unify, Compound, Variable
>>> expr = Compound("Add", ("x", "y"))
>>> pattern = Compound("Add", ("x", Variable("a")))
>>> next(unify(expr, pattern, {}))
{Variable(a): 'y'}
"""
s = s or {}
if x == y:
yield s
elif isinstance(x, (Variable, CondVariable)):
for match in unify_var(x, y, s, **fns):
yield match
elif isinstance(y, (Variable, CondVariable)):
for match in unify_var(y, x, s, **fns):
yield match
elif isinstance(x, Compound) and isinstance(y, Compound):
is_commutative = fns.get('is_commutative', lambda x: False)
is_associative = fns.get('is_associative', lambda x: False)
for sop in unify(x.op, y.op, s, **fns):
if is_associative(x) and is_associative(y):
a, b = (x, y) if len(x.args) < len(y.args) else (y, x)
if is_commutative(x) and is_commutative(y):
combs = allcombinations(a.args, b.args, 'commutative')
else:
combs = allcombinations(a.args, b.args, 'associative')
for aaargs, bbargs in combs:
aa = [unpack(Compound(a.op, arg)) for arg in aaargs]
bb = [unpack(Compound(b.op, arg)) for arg in bbargs]
for match in unify(aa, bb, sop, **fns):
yield match
elif len(x.args) == len(y.args):
for match in unify(x.args, y.args, sop, **fns):
yield match
elif is_args(x) and is_args(y) and len(x) == len(y):
if len(x) == 0:
yield s
else:
for shead in unify(x[0], y[0], s, **fns):
for match in unify(x[1:], y[1:], shead, **fns):
yield match
def unify_var(var, x, s, **fns):
if var in s:
for match in unify(s[var], x, s, **fns):
yield match
elif occur_check(var, x):
pass
elif isinstance(var, CondVariable) and var.valid(x):
yield assoc(s, var, x)
elif isinstance(var, Variable):
yield assoc(s, var, x)
def occur_check(var, x):
""" var occurs in subtree owned by x? """
if var == x:
return True
elif isinstance(x, Compound):
return occur_check(var, x.args)
elif is_args(x):
if any(occur_check(var, xi) for xi in x): return True
return False
def assoc(d, key, val):
""" Return copy of d with key associated to val """
d = d.copy()
d[key] = val
return d
def is_args(x):
""" Is x a traditional iterable? """
return type(x) in (tuple, list, set)
def unpack(x):
if isinstance(x, Compound) and len(x.args) == 1:
return x.args[0]
else:
return x
def allcombinations(A, B, ordered):
"""
Restructure A and B to have the same number of elements
ordered must be either 'commutative' or 'associative'
A and B can be rearranged so that the larger of the two lists is
reorganized into smaller sublists.
>>> from sympy.unify.core import allcombinations
>>> for x in allcombinations((1, 2, 3), (5, 6), 'associative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
>>> for x in allcombinations((1, 2, 3), (5, 6), 'commutative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
(((1,), (3, 2)), ((5,), (6,)))
(((1, 3), (2,)), ((5,), (6,)))
(((2,), (1, 3)), ((5,), (6,)))
(((2, 1), (3,)), ((5,), (6,)))
(((2,), (3, 1)), ((5,), (6,)))
(((2, 3), (1,)), ((5,), (6,)))
(((3,), (1, 2)), ((5,), (6,)))
(((3, 1), (2,)), ((5,), (6,)))
(((3,), (2, 1)), ((5,), (6,)))
(((3, 2), (1,)), ((5,), (6,)))
"""
if ordered == "commutative":
ordered = 11
if ordered == "associative":
ordered = None
sm, bg = (A, B) if len(A) < len(B) else (B, A)
for part in kbins(list(range(len(bg))), len(sm), ordered=ordered):
if bg == B:
yield tuple((a,) for a in A), partition(B, part)
else:
yield partition(A, part), tuple((b,) for b in B)
def partition(it, part):
""" Partition a tuple/list into pieces defined by indices
>>> from sympy.unify.core import partition
>>> partition((10, 20, 30, 40), [[0, 1, 2], [3]])
((10, 20, 30), (40,))
"""
return type(it)([index(it, ind) for ind in part])
def index(it, ind):
""" Fancy indexing into an indexable iterable (tuple, list)
>>> from sympy.unify.core import index
>>> index([10, 20, 30], (1, 2, 0))
[20, 30, 10]
"""
return type(it)([it[i] for i in ind])
| ahhda/sympy | sympy/unify/core.py | Python | bsd-3-clause | 7,189 |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Consul specific configuration."""
import json
import os
import server
class ConsulTopoServer(server.TopoServer):
"""Implementation of TopoServer for consul."""
def setup(self):
import environment # pylint: disable=g-import-not-at-top
import utils # pylint: disable=g-import-not-at-top
self.port_base = environment.reserve_ports(4)
self.server_addr = 'localhost:%d' % (self.port_base + 1)
# Write our config file.
self.config_file = os.path.join(environment.vtdataroot, 'consul.json')
config = {
'ports': {
'dns': self.port_base,
'http': self.port_base + 1,
'serf_lan': self.port_base + 2,
'serf_wan': self.port_base + 3,
},
}
with open(self.config_file, 'w') as fd:
fd.write(json.dumps(config))
log_base = os.path.join(environment.vtlogroot, 'consul')
self.proc = utils.run_bg([
'consul', 'agent',
'-dev',
'-config-file', self.config_file],
stdout=open(log_base + '.stdout', 'a'),
stderr=open(log_base + '.stderr', 'a'))
# Wait until the daemon is ready.
utils.curl(
'http://' + self.server_addr + '/v1/kv/?keys', retry_timeout=10)
# Create the cell configurations using 'vtctl AddCellInfo'
for cell in ['test_nj', 'test_ny', 'test_ca']:
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', cell,
'-server_address', self.server_addr,
cell])
def teardown(self):
import utils # pylint: disable=g-import-not-at-top
utils.kill_sub_process(self.proc)
self.proc.wait()
def flags(self):
return [
'-topo_implementation', 'consul',
'-topo_global_server_address', self.server_addr,
'-topo_global_root', 'global',
]
def wipe(self):
import utils # pylint: disable=g-import-not-at-top
utils.curl('http://' + self.server_addr + '/v1/kv/global/keyspaces?recurse',
request='DELETE')
for cell in ['test_nj', 'test_ny', 'test_ca']:
utils.curl('http://' + self.server_addr + '/v1/kv/' + cell + '?recurse',
request='DELETE')
def update_addr(self, cell, keyspace, shard, tablet_index, port):
pass
server.flavor_map['consul'] = ConsulTopoServer()
| dcadevil/vitess | test/topo_flavor/consul.py | Python | apache-2.0 | 2,965 |
"""
Grid manipulation related command line interface.
Access this with something like:
python -m stompy.grid.cli <args>
"""
# why has python gone through three iterations of argument parsing modules?
from __future__ import print_function
import sys
import argparse
import logging as log
from . import unstructured_grid
ops=[]
stack=[]
class Op(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
#print '%r %r %r' % (namespace, values, option_string)
ops.append( (self,values) )
class ReadGrid(Op):
formats={}
def run(self,args):
fmt,path=args[0].split(':')
log.info("Reading %s as %s"%(path,fmt))
if fmt in self.formats:
g=self.formats[fmt][1](fmt,path)
else:
log.error("Did not understand format %s"%fmt)
log.error("Read formats are: %s"%(self.format_list()))
sys.exit(1)
stack.append(g)
log.info("Read grid (%d cells, %d edges, %d nodes)"%(g.Ncells(),g.Nedges(),g.Nnodes()))
@classmethod
def format_list(cls):
fmt_names=list(cls.formats.keys())
fmt_names.sort()
return ", ".join(fmt_names)
ReadGrid.formats['suntans_classic']=['SUNTANS (classic)',
lambda fmt,path: unstructured_grid.UnstructuredGrid.read_suntans_classic(path)]
ReadGrid.formats['suntans_hybrid']=['SUNTANS (hybrid)',
lambda fmt,path: unstructured_grid.UnstructuredGrid.read_suntans_hybrid(path)]
ReadGrid.formats['suntans']=['SUNTANS (auto)',
lambda fmt,path: unstructured_grid.UnstructuredGrid.read_suntans(path)]
ReadGrid.formats['ugrid']=['UGRID netCDF',
lambda fmt,path: unstructured_grid.UnstructuredGrid.from_ugrid(path)]
ReadGrid.formats['untrim']=['UnTRIM',
lambda fmt,path: unstructured_grid.UnTRIM08Grid(path)]
ReadGrid.formats['dfm']=['DFM netCDF (*_net.nc)',
lambda fmt,path: unstructured_grid.UnstructuredGrid.read_dfm(fn=path)]
ReadGrid.formats['sms']=['SMS grd',
lambda fmt,path: unstructured_grid.UnstructuredGrid.read_sms(path)]
class Dualify(Op):
def run(self,args):
g=stack.pop()
gd=g.create_dual(center='circumcenter',create_cells=True,remove_disconnected=True)
stack.append(gd)
# TODO: switch format handling to be more like ReadGrid
class WriteGrid(Op):
clobber=False
def run(self,args):
fmt,path=args[0].split(':')
log.info("Writing %s as %s"%(path,fmt))
g=stack[-1] # by default, do not pop from stack
if fmt in ['suntans_classic','suntans']:
g.write_suntans(path,overwrite=self.clobber)
elif fmt=='suntans_hybrid':
g.write_suntans_hybrid(path,overwrite=self.clobber)
elif fmt=='ugrid':
g.write_ugrid(path,overwrite=self.clobber)
elif fmt=='untrim':
g.write_untrim08(path,overwrite=self.clobber)
elif fmt=='cell_shp':
g.write_cells_shp(path,overwrite=self.clobber)
elif fmt=='boundary_shp':
g.write_shore_shp(path,overwrite=self.clobber)
elif fmt=='edge_shp':
g.write_edges_shp(path,overwrite=self.clobber)
elif fmt=='node_shp':
g.write_node_shp(path,overwrite=self.clobber)
elif fmt=='fishptm':
g.write_ptm_gridfile(path,overwrite=self.clobber)
elif fmt=='dfm':
from stompy.model.delft import dfm_grid
if not path.endswith('_net.nc'):
log.warning("Writing DFM grid to filename not ending in '_net.nc'")
dfm_grid.write_dfm(g,path,overwrite=self.clobber)
else:
log.error("Did not understand format %s"%fmt)
log.error("Possible formats are: %s"%self.format_list())
sys.exit(1)
@classmethod
def format_list(cls):
return "suntans_classic, suntans_hybrid, ugrid, untrim, cell_shp, boundary_shp, edge_shp, node_shp, fishptm, dfm"
class SetClobber(Op):
def run(self,args):
WriteGrid.clobber=True
parser = argparse.ArgumentParser(description='Manipulate unstructured grids.')
parser.add_argument("-i", "--input", help="Read a grid, fmt one of %s"%ReadGrid.format_list(),
metavar="fmt:path",
nargs=1,action=ReadGrid)
parser.add_argument("-o", "--output", help="Write a grid, fmt one of %s"%WriteGrid.format_list(),
metavar="fmt:path",
nargs=1,action=WriteGrid)
parser.add_argument("-d", "--dual", help="Convert to dual of grid",
nargs=0,action=Dualify)
parser.add_argument("-c", "--clobber", help="Allow overwriting of existing grids",
nargs=0,action=SetClobber)
def parse_and_run(cmd=None):
# In case there are repeated calls from a script:
del ops[:]
del stack[:]
if cmd is not None:
# allows for calling cli.py in a script, but with the exact same commandline.
# Except that this doesn't work? It gives an error related to ipython arguments
# and the real sys.argv, rather than the argv I just specified.
argv=cmd.split()
print("Parsing %r"%argv)
args=parser.parse_args(argv)
else:
args=parser.parse_args()
for impl,args in ops:
impl.run(args)
if __name__ == '__main__':
parse_and_run()
| rustychris/stompy | stompy/grid/cli.py | Python | mit | 5,508 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 KenTyde
# All rights reserved.
#
# This software is licensed as described in the file LICENSE,
# which you should have received as part of this distribution.
from setuptools import setup
import os
desc = open(os.path.join(os.path.dirname(__file__), 'README')).read()
setup(
name='fixlib',
version='0.5',
description='Pythonic library for dealing with the FIX protocol',
long_description=desc,
author='Dirkjan Ochtman',
author_email='[email protected]',
license='BSD',
url='http://source.kentyde.com/fixlib',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
packages=['fixlib'],
test_suite='fixlib.tests.suite',
)
| jvirtanen/fixlib | setup.py | Python | bsd-3-clause | 886 |
""" Tests for journals marketing views. """
from __future__ import absolute_import
import uuid
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from openedx.features.journals.api import JOURNAL_INTEGRATION
from openedx.features.journals.tests.utils import (
get_mocked_journal_bundles,
get_mocked_journals,
get_mocked_pricing_data,
override_switch
)
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@mock.patch.dict(settings.FEATURES, {"JOURNALS_ENABLED": True})
class JournalBundleViewTest(CacheIsolationTestCase, SiteMixin):
""" Tests for journals marketing views. """
@override_switch(JOURNAL_INTEGRATION, True)
@mock.patch('openedx.features.journals.api.DiscoveryApiClient.get_journal_bundles')
def test_journal_bundle_with_empty_data(self, mock_bundles):
"""
Test the marketing page without journal bundle data.
"""
mock_bundles.return_value = []
response = self.client.get(
path=reverse(
"openedx.journals.bundle_about",
kwargs={'bundle_uuid': str(uuid.uuid4())}
)
)
self.assertEqual(response.status_code, 404)
@override_switch(JOURNAL_INTEGRATION, True)
@mock.patch('openedx.features.journals.views.marketing.get_pricing_data')
@mock.patch('openedx.features.journals.api.DiscoveryApiClient.get_journal_bundles')
def test_journal_bundle_with_valid_data(self, mock_bundles, mock_pricing_data):
"""
Test the marketing page with journal bundle data.
"""
journal_bundles = get_mocked_journal_bundles()
journal_bundle = journal_bundles[0]
mock_pricing_data.return_value = get_mocked_pricing_data()
mock_bundles.return_value = journal_bundles
response = self.client.get(
path=reverse(
"openedx.journals.bundle_about",
kwargs={'bundle_uuid': str(uuid.uuid4())}
)
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Purchase the Bundle")
self.assertContains(response, journal_bundle["title"])
self.assertContains(response, journal_bundle["courses"][0]["short_description"])
self.assertContains(response, journal_bundle["courses"][0]["course_runs"][0]["title"])
@mock.patch.dict(settings.FEATURES, {"JOURNALS_ENABLED": True})
class JournalIndexViewTest(SiteMixin, ModuleStoreTestCase):
"""
Tests for Journals Listing in Marketing Pages.
"""
def setUp(self):
super(JournalIndexViewTest, self).setUp()
self.journal_bundles = get_mocked_journal_bundles()
self.journal_bundle = self.journal_bundles[0]
self.journals = get_mocked_journals()
def assert_journal_data(self, response):
"""
Checks the journal data in given response
"""
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Bundle")
self.assertContains(response, self.journal_bundle["uuid"])
self.assertContains(response, self.journal_bundle["title"])
self.assertContains(response, self.journal_bundle["organization"])
for journal in self.journals:
self.assertContains(response, "Journal")
self.assertContains(response, journal["title"])
self.assertContains(response, journal["organization"])
@override_switch(JOURNAL_INTEGRATION, True)
@mock.patch('student.views.management.get_journals_context')
def test_journals_index_page(self, mock_journals_context):
"""
Test the journal data on index page.
"""
mock_journals_context.return_value = {'journal_bundles': self.journal_bundles, 'journals': self.journals}
response = self.client.get(reverse('root'))
self.assert_journal_data(response)
@override_switch(JOURNAL_INTEGRATION, False)
def test_journals_index_page_disabled(self):
"""
Test the index page can load with journals disabled
"""
response = self.client.get(reverse('root'))
self.assertEqual(response.status_code, 200)
@override_switch(JOURNAL_INTEGRATION, True)
@mock.patch('openedx.features.journals.api.DiscoveryApiClient.get_journals')
@mock.patch('openedx.features.journals.api.DiscoveryApiClient.get_journal_bundles')
def test_journals_courses_page(self, mock_journal_bundles, mock_journals):
"""
Test the journal data on courses page.
"""
mock_journal_bundles.return_value = self.journal_bundles
mock_journals.return_value = self.journals
response = self.client.get(reverse('courses'))
self.assert_journal_data(response)
@override_switch(JOURNAL_INTEGRATION, False)
def test_journals_courses_page_disabled(self):
"""
Test the courses pages can load with journals disabled
"""
response = self.client.get(reverse('courses'))
self.assertEqual(response.status_code, 200)
| jolyonb/edx-platform | openedx/features/journals/tests/test_marketing_views.py | Python | agpl-3.0 | 5,227 |
"""Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug, find_executable
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
# mathtext can't render some LaTeX commands. For example, it can't
# render any LaTeX environments such as array or matrix. So here we
# ensure that if mathtext fails to render, we return None.
try:
return latex_to_png(o)
except ValueError as e:
debug('matplotlib exception caught:', repr(e))
return None
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' Falling back to matplotlib backend')
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
if find_executable('latex') and find_executable('dvipng'):
debug("init_printing: Setting use_latex to True")
use_latex = True
else:
try:
import matplotlib
except ImportError:
debug("init_printing: Setting use_latex to False")
use_latex = False
else:
debug("init_printing: Setting use_latex to 'matplotlib'")
use_latex = 'matplotlib'
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| maniteja123/sympy | sympy/interactive/printing.py | Python | bsd-3-clause | 16,667 |
from django.test.testcases import TestCase
from lti_auth.auth import LTIBackend
from lti_auth.lti import LTI
from lti_auth.tests.factories import BASE_LTI_PARAMS, UserFactory
class LTIBackendTest(TestCase):
def setUp(self):
self.backend = LTIBackend()
self.lti = LTI('initial', 'any')
self.lti.lti_params = BASE_LTI_PARAMS.copy()
def test_create_user(self):
user = self.backend.create_user(self.lti, '12345')
self.assertFalse(user.has_usable_password())
self.assertEquals(user.email, '[email protected]')
self.assertEquals(user.get_full_name(), 'Foo Baz')
def test_create_user_no_full_name(self):
self.lti.lti_params.pop('lis_person_name_full')
user = self.backend.create_user(self.lti, '12345')
self.assertEquals(user.get_full_name(), 'student')
def test_create_user_empty_full_name(self):
self.lti.lti_params['lis_person_name_full'] = ''
user = self.backend.create_user(self.lti, '12345')
self.assertEquals(user.get_full_name(), 'student')
def test_create_user_long_name(self):
self.lti.lti_params['lis_person_name_full'] = (
'Pneumonoultramicroscopicsilicovolcanoconiosis '
'Supercalifragilisticexpialidocious')
user = self.backend.create_user(self.lti, '12345')
self.assertEquals(
user.get_full_name(),
'Pneumonoultramicroscopicsilico Supercalifragilisticexpialidoc')
def test_find_or_create_user1(self):
# via email
user = UserFactory(email='[email protected]')
self.assertEquals(self.backend.find_or_create_user(self.lti), user)
def test_find_or_create_user2(self):
# via hashed username
self.lti.lti_params['oauth_consumer_key'] = '1234567890'
username = self.backend.get_hashed_username(self.lti)
user = UserFactory(username=username)
self.assertEquals(self.backend.find_or_create_user(self.lti), user)
def test_find_or_create_user3(self):
# new user
self.lti.lti_params['oauth_consumer_key'] = '1234567890'
user = self.backend.find_or_create_user(self.lti)
self.assertFalse(user.has_usable_password())
self.assertEquals(user.email, '[email protected]')
self.assertEquals(user.get_full_name(), 'Foo Baz')
username = self.backend.get_hashed_username(self.lti)
self.assertEquals(user.username, username)
def test_get_user(self):
user = UserFactory()
self.assertIsNone(self.backend.get_user(1234))
self.assertEquals(self.backend.get_user(user.id), user)
| c0cky/mediathread | lti_auth/tests/test_auth.py | Python | gpl-2.0 | 2,618 |
import os
import urllib2
import urllib
import requests
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(17, GPIO.IN)
while True:
if (GPIO.input(17) == True):
print ('blue')
urllib.urlopen("http://queuemeleon.herokuapp.com/blue")
time.sleep(.5) | radiolarian/clover | raspi-files/send_color.py | Python | mit | 297 |
"""
The xml.py script is an import translator plugin to get a carving from an Art of Illusion xml file.
An import plugin is a script in the import_plugins folder which has the function getCarving. It is meant to be run from the
interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin
a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
This example gets a triangle mesh for the xml file boolean.xml. This example is run in a terminal in the folder which contains
boolean.xml and xml.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import xml
>>> xml.getCarving().getCarveRotatedBoundaryLayers()
[-1.159765625, None, [[(-18.925000000000001-2.4550000000000001j), (-18.754999999999981-2.4550000000000001j)
..
many more lines of the carving
..
An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then
picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK".
Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, this plugin will ignore
the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of
Illusion scene and they will all be carved, then fabricated together.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from skeinforge_tools.skeinforge_utilities.vector3 import Vector3
from skeinforge_tools.skeinforge_utilities.xml_simple_parser import XMLSimpleParser
from skeinforge_tools.skeinforge_utilities import euclidean
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import intercircle
from skeinforge_tools.skeinforge_utilities import triangle_mesh
import math
import sys
__author__ = "Enrique Perez ([email protected])"
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
#check that matrices & bridge are working, see how to handle a list of objects in Art of Illusion for subtracting
def addCarvableObjectInfo( carvableObjectInfos, objectInfoElement ):
"Add the object info if it is carvable."
carvableObjectInfo = getCarvableObjectInfo( objectInfoElement )
if carvableObjectInfo == None:
return
if objectInfoElement.attributeTable[ 'visible' ] == 'false':
return
carvableObjectInfo.setShape( carvableObjectInfo.matrix4By4 )
carvableObjectInfos.append( carvableObjectInfo )
def addCarvableObjectInfoWithMatrix( carvableObjectInfos, matrix4By4, objectInfoElement ):
"Add the object info if it is carvable."
carvableObjectInfo = getCarvableObjectInfo( objectInfoElement )
if carvableObjectInfo == None:
return
newMatrix4By4 = matrix4By4.getMultiplied( carvableObjectInfo.matrix4By4.matrix )
carvableObjectInfo.setShape( newMatrix4By4 )
carvableObjectInfos.append( carvableObjectInfo )
def addLineLoopsIntersections( loopLoopsIntersections, loops, pointBegin, pointEnd ):
"Add intersections of the line with the loops."
normalizedSegment = pointEnd - pointBegin
normalizedSegmentLength = abs( normalizedSegment )
if normalizedSegmentLength <= 0.0:
return
lineLoopsIntersections = []
normalizedSegment /= normalizedSegmentLength
segmentYMirror = complex( normalizedSegment.real, - normalizedSegment.imag )
pointBeginRotated = segmentYMirror * pointBegin
pointEndRotated = segmentYMirror * pointEnd
addLoopsXSegmentIntersections( lineLoopsIntersections, loops, pointBeginRotated.real, pointEndRotated.real, segmentYMirror, pointBeginRotated.imag )
for lineLoopsIntersection in lineLoopsIntersections:
point = complex( lineLoopsIntersection, pointBeginRotated.imag ) * normalizedSegment
loopLoopsIntersections.append( point )
def addLineXSegmentIntersection( lineLoopsIntersections, segmentFirstX, segmentSecondX, vector3First, vector3Second, y ):
"Add intersections of the line with the x segment."
isYAboveFirst = y > vector3First.imag
isYAboveSecond = y > vector3Second.imag
if isYAboveFirst == isYAboveSecond:
return
xIntersection = euclidean.getXIntersection( vector3First, vector3Second, y )
if xIntersection <= min( segmentFirstX, segmentSecondX ):
return
if xIntersection >= max( segmentFirstX, segmentSecondX ):
return
lineLoopsIntersections.append( xIntersection )
def addLoopLoopsIntersections( loop, loopsLoopsIntersections, otherLoops ):
"Add intersections of the loop with the other loops."
for pointIndex in xrange( len( loop ) ):
pointBegin = loop[ pointIndex ]
pointEnd = loop[ ( pointIndex + 1 ) % len( loop ) ]
addLineLoopsIntersections( loopsLoopsIntersections, otherLoops, pointBegin, pointEnd )
def addLoopsXSegmentIntersections( lineLoopsIntersections, loops, segmentFirstX, segmentSecondX, segmentYMirror, y ):
"Add intersections of the loops with the x segment."
for loop in loops:
addLoopXSegmentIntersections( lineLoopsIntersections, loop, segmentFirstX, segmentSecondX, segmentYMirror, y )
def addLoopXSegmentIntersections( lineLoopsIntersections, loop, segmentFirstX, segmentSecondX, segmentYMirror, y ):
"Add intersections of the loop with the x segment."
rotatedLoop = euclidean.getPointsRoundZAxis( segmentYMirror, loop )
for pointIndex in xrange( len( rotatedLoop ) ):
pointFirst = rotatedLoop[ pointIndex ]
pointSecond = rotatedLoop[ ( pointIndex + 1 ) % len( rotatedLoop ) ]
addLineXSegmentIntersection( lineLoopsIntersections, segmentFirstX, segmentSecondX, pointFirst, pointSecond, y )
def getBottom( points ):
"Get the bottom of the points."
bottom = 999999999.9
for point in points:
bottom = min( bottom, point.z )
return bottom
def getCarvableObjectInfo( objectInfoElement ):
"Get the object info if it is carvable."
if objectInfoElement == None:
return
object = objectInfoElement.getFirstChildWithClassName( 'object' )
shapeType = object.attributeTable[ 'bf:type' ]
if shapeType not in globalCarvableClassObjectInfoTable:
return
carvableClassObjectInfo = globalCarvableClassObjectInfoTable[ shapeType ]
newCarvableObjectInfo = carvableClassObjectInfo.getNewCarvableObjectInfo( objectInfoElement )
return newCarvableObjectInfo
def getCarvableClassObjectInfoTable():
"Get the carvable class object info table."
carvableClassObjectInfos = [ CSGObjectObjectInfo(), CubeObjectInfo(), CylinderObjectInfo(), SphereObjectInfo(), TriangleMeshObjectInfo() ]
carvableClassObjectInfoTable = {}
for carvableClassObjectInfo in carvableClassObjectInfos:
className = carvableClassObjectInfo.__class__.__name__
truncatedClassName = className[ : className.find( 'ObjectInfo' ) ]
carvableClassObjectInfoTable[ truncatedClassName ] = carvableClassObjectInfo
return carvableClassObjectInfoTable
def getCarving( fileName = '' ):
"Get the carving for the xml file."
if fileName == '':
unmodified = gcodec.getFilesWithFileTypeWithoutWords( 'xml' )
if len( unmodified ) == 0:
print( "There is no xml file in this folder." )
return None
fileName = unmodified[ 0 ]
carving = XMLCarving()
carving.parseXML( gcodec.getFileText( fileName ) )
return carving
def getInBetweenPointsFromLoops( importRadius, loops ):
"Get the in between points from loops."
inBetweenPoints = []
for loop in loops:
for pointIndex in xrange( len( loop ) ):
pointBegin = loop[ pointIndex ]
pointEnd = loop[ ( pointIndex + 1 ) % len( loop ) ]
intercircle.addPointsFromSegment( inBetweenPoints, importRadius, pointBegin, pointEnd, 0.2123 )
return inBetweenPoints
def getInBetweenPointsFromLoopsBoundarySideOtherLoops( inside, importRadius, loops, otherLoops, radiusSide ):
"Get the in between points from loops."
inBetweenPoints = []
for loop in loops:
for pointIndex in xrange( len( loop ) ):
pointBegin = loop[ pointIndex ]
pointEnd = loop[ ( pointIndex + 1 ) % len( loop ) ]
inBetweenSegmentPoints = []
intercircle.addPointsFromSegment( inBetweenSegmentPoints, importRadius, pointBegin, pointEnd, 0.2123 )
for inBetweenSegmentPoint in inBetweenSegmentPoints:
if isPointOrEitherLineBoundarySideInsideLoops( inside, otherLoops, pointBegin, inBetweenSegmentPoint, pointEnd, radiusSide ):
inBetweenPoints.append( inBetweenSegmentPoint )
return inBetweenPoints
def getJoinedList( originalLists ):
"Get the lists as one joined list."
concatenatedList = []
for originalList in originalLists:
concatenatedList += originalList
return concatenatedList
def getLoopsListsIntersections( loopsList ):
"Get intersections betweens the loops lists."
loopsListsIntersections = []
for loopsIndex in xrange( len( loopsList ) ):
loops = loopsList[ loopsIndex ]
for otherLoops in loopsList[ : loopsIndex ]:
loopsListsIntersections += getLoopsLoopsIntersections( loops, otherLoops )
return loopsListsIntersections
def getLoopsLoopsIntersections( loops, otherLoops ):
"Get all the intersections of the loops with the other loops."
loopsLoopsIntersections = []
for loop in loops:
addLoopLoopsIntersections( loop, loopsLoopsIntersections, otherLoops )
return loopsLoopsIntersections
def getPointsBoundarySideLoops( inside, loops, points, radius ):
"Get the points inside the loops."
pointsInsideLoops = []
for pointIndex in xrange( len( points ) ):
pointBegin = points[ ( pointIndex + len( points ) - 1 ) % len( points ) ]
pointCenter = points[ pointIndex ]
pointEnd = points[ ( pointIndex + 1 ) % len( points ) ]
if isPointOrEitherBoundarySideInsideLoops( inside, loops, pointBegin, pointCenter, pointEnd, radius ):
pointsInsideLoops.append( pointCenter )
return pointsInsideLoops
def getSubObjectInfoLoopsList( importRadius, subObjectInfos, z ):
"Get subObjectInfo loops list."
subObjectInfoLoopsList = []
for subObjectInfo in subObjectInfos:
subObjectInfoLoops = subObjectInfo.getLoops( importRadius, z )
subObjectInfoLoopsList.append( subObjectInfoLoops )
return subObjectInfoLoopsList
def getTop( points ):
"Get the top of the points."
top = - 999999999.9
for point in points:
top = max( top, point.z )
return top
def getTransformedByList( floatList, point ):
"Get the point transformed by the array."
return floatList[ 0 ] * point.x + floatList[ 1 ] * point.y + floatList[ 2 ] * point.z + floatList[ 3 ]
def getValueInQuotes( name, text, value ):
"Get value in quotes after the name."
nameAndQuote = name + '="'
nameIndexStart = text.find( nameAndQuote )
if nameIndexStart == - 1:
return value
valueStartIndex = nameIndexStart + len( nameAndQuote )
nameIndexEnd = text.find( '"', valueStartIndex )
if nameIndexEnd == - 1:
return value
return float( text[ valueStartIndex : nameIndexEnd ] )
def getVector3TransformedByMatrix( matrix, vector3 ):
"Get the vector3 multiplied by a vector3."
vector3Transformed = Vector3()
vector3Transformed.x = getTransformedByList( matrix[ 0 ], vector3 )
vector3Transformed.y = getTransformedByList( matrix[ 1 ], vector3 )
vector3Transformed.z = getTransformedByList( matrix[ 2 ], vector3 )
return vector3Transformed
def isPointOrEitherBoundarySideInsideLoops( inside, loops, pointBegin, pointCenter, pointEnd, radius ):
"Determine if the point or a point on either side of the point, is inside the loops."
if euclidean.isPointInsideLoops( loops, pointCenter ) != inside:
return False
segmentBegin = pointBegin - pointCenter
segmentEnd = pointEnd - pointCenter
segmentBeginLength = abs( segmentBegin )
segmentEndLength = abs( segmentEnd )
if segmentBeginLength <= 0.0 or segmentEndLength <= 0.0:
return False
segmentBegin /= segmentBeginLength
segmentEnd /= segmentEndLength
addedSegment = segmentBegin + segmentEnd
addedSegmentLength = abs( addedSegment )
if addedSegmentLength > 0.0:
addedSegment *= radius / addedSegmentLength
else:
addedSegment = radius * complex( segmentEnd.imag, - segmentEnd.real )
if euclidean.isPointInsideLoops( loops, pointCenter + addedSegment ) != inside:
return False
return euclidean.isPointInsideLoops( loops, pointCenter - addedSegment ) == inside
def isPointOrEitherLineBoundarySideInsideLoops( inside, loops, pointBegin, pointCenter, pointEnd, radius ):
"Determine if the point or a point on either side of the point, is inside the loops."
if euclidean.isPointInsideLoops( loops, pointCenter ) != inside:
return False
segment = pointEnd - pointBegin
segmentLength = abs( segment )
if segmentLength <= 0.0:
return False
segment /= segmentLength
addedSegment = radius * complex( segment.imag, - segment.real )
if euclidean.isPointInsideLoops( loops, pointCenter + addedSegment ) != inside:
return False
return euclidean.isPointInsideLoops( loops, pointCenter - addedSegment ) == inside
class Matrix4By4:
"A four by four matrix."
def __init__( self ):
"Add empty lists."
self.matrix = None
def __repr__( self ):
"Get the string representation of this four by four matrix."
return str( self.matrix )
def getFromAttributeTable( self, attributeTable ):
"Get the from row column attribute strings, counting from one."
for column in xrange( 4 ):
for row in xrange( 4 ):
columnString = str( column + 1 )
rowString = str( row + 1 )
key = 'm' + columnString + rowString
if key in attributeTable:
if self.matrix == None:
self.setMatrixToZero()
self.matrix[ column ][ row ] = float( attributeTable[ key ] )
else:
self.matrix = None
return self
return self
def getMultiplied( self, otherMatrix ):
"Get this matrix multiplied by the other matrix."
if otherMatrix == None or self.matrix == None:
return None
#A down, B right from http://en.wikipedia.org/wiki/Matrix_multiplication
newMatrix4By4 = Matrix4By4()
newMatrix4By4.setMatrixToZero()
for column in xrange( 4 ):
for row in xrange( 4 ):
matrixColumn = self.matrix[ column ]
dotProduct = 0
for elementIndex in xrange( 4 ):
dotProduct += matrixColumn[ elementIndex ] * otherMatrix[ elementIndex ][ row ]
newMatrix4By4.matrix[ column ][ row ] = dotProduct
return newMatrix4By4
def setMatrixToZero( self ):
"Get the matrix elements to zero."
self.matrix = [ [ 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.0 ] ]
class XMLCarving:
"An svg carving."
def __init__( self ):
"Add empty lists."
self.belowLoops = []
self.bridgeLayerThickness = None
self.carvableObjectInfos = []
self.importRadius = 0.3
self.layerThickness = 0.4
self.rotatedBoundaryLayers = []
def __repr__( self ):
"Get the string representation of this carving."
return str( self.rotatedBoundaryLayers )
def getCarveCornerMaximum( self ):
"Get the corner maximum of the vertices."
return self.cornerMaximum
def getCarveCornerMinimum( self ):
"Get the corner minimum of the vertices."
return self.cornerMinimum
def getCarveLayerThickness( self ):
"Get the layer thickness."
return self.layerThickness
def getCarveRotatedBoundaryLayers( self ):
"Get the rotated boundary layers."
if len( self.carvableObjectInfos ) < 1:
return []
self.cornerMaximum = Vector3( - 999999999.0, - 999999999.0, - 9999999999.9 )
self.cornerMinimum = Vector3( 999999999.0, 999999999.0, 9999999999.9 )
for carvableObjectInfo in self.carvableObjectInfos:
self.cornerMaximum.z = max( self.cornerMaximum.z, carvableObjectInfo.top )
self.cornerMinimum.z = min( self.cornerMinimum.z, carvableObjectInfo.bottom )
halfHeight = 0.5 * self.layerThickness
layerTop = self.cornerMaximum.z - halfHeight
self.setActualMinimumZ( halfHeight, layerTop )
self.zZoneInterval = triangle_mesh.getZoneInterval( self.layerThickness )
z = self.cornerMinimum.z + halfHeight
while z < layerTop:
z = self.getZAddExtruderPaths( z )
for rotatedBoundaryLayer in self.rotatedBoundaryLayers:
for loop in rotatedBoundaryLayer.loops:
for point in loop:
pointVector3 = Vector3( point.real, point.imag, rotatedBoundaryLayer.z )
self.cornerMaximum = euclidean.getPointMaximum( self.cornerMaximum, pointVector3 )
self.cornerMinimum = euclidean.getPointMinimum( self.cornerMinimum, pointVector3 )
self.cornerMaximum.z = layerTop + halfHeight
for rotatedBoundaryLayerIndex in xrange( len( self.rotatedBoundaryLayers ) - 1, - 1, - 1 ):
rotatedBoundaryLayer = self.rotatedBoundaryLayers[ rotatedBoundaryLayerIndex ]
if len( rotatedBoundaryLayer.loops ) > 0:
return self.rotatedBoundaryLayers[ : rotatedBoundaryLayerIndex + 1 ]
return []
def getExtruderPaths( self, z ):
"Get extruder loops."
rotatedBoundaryLayer = euclidean.RotatedLoopLayer( z )
for carvableObjectInfo in self.carvableObjectInfos:
rotatedBoundaryLayer.loops += carvableObjectInfo.getLoops( self.importRadius, z )
return rotatedBoundaryLayer
def getZAddExtruderPaths( self, z ):
"Get next z and add extruder loops."
zoneArray = []
vertices = []
for carvableObjectInfo in self.carvableObjectInfos:
vertices += carvableObjectInfo.getVertices()
for point in vertices:
triangle_mesh.addToZoneArray( point, z, zoneArray, self.zZoneInterval )
lowestZoneIndex = triangle_mesh.getLowestZoneIndex( zoneArray, z )
halfAround = int( math.ceil( float( lowestZoneIndex ) / 2.0 ) )
zAround = float( halfAround ) * self.zZoneInterval
if lowestZoneIndex % 2 == 1:
zAround = - zAround
zPlusAround = z + zAround
rotatedBoundaryLayer = self.getExtruderPaths( zPlusAround )
self.rotatedBoundaryLayers.append( rotatedBoundaryLayer )
if self.bridgeLayerThickness == None:
return z + self.layerThickness
allExtrudateLoops = []
for loop in rotatedBoundaryLayer.loops:
allExtrudateLoops += triangle_mesh.getBridgeLoops( self.layerThickness, loop )
rotatedBoundaryLayer.rotation = triangle_mesh.getBridgeDirection( self.belowLoops, allExtrudateLoops, self.layerThickness )
self.belowLoops = allExtrudateLoops
if rotatedBoundaryLayer.rotation == None:
return z + self.layerThickness
return z + self.bridgeLayerThickness
def parseXML( self, xmlText ):
"Parse XML text and store the layers."
if xmlText == '':
return None
xmlParser = XMLSimpleParser( xmlText )
artOfIllusionElement = xmlParser.rootElement.getFirstChildWithClassName( 'ArtOfIllusion' )
sceneElement = artOfIllusionElement.getFirstChildWithClassName( 'Scene' )
rootElement = sceneElement.getFirstChildWithClassName( 'objects' )
objectInfoElements = rootElement.getChildrenWithClassName( 'bf:Elem' )
for objectInfoElement in objectInfoElements:
addCarvableObjectInfo( self.carvableObjectInfos, objectInfoElement )
def setActualMinimumZ( self, halfHeight, layerTop ):
"Get the actual minimum z at the lowest rotated boundary layer."
while self.cornerMinimum.z < layerTop:
if len( self.getExtruderPaths( self.cornerMinimum.z ).loops ) > 0:
increment = - halfHeight
while abs( increment ) > 0.001 * halfHeight:
self.cornerMinimum.z += increment
increment = 0.5 * abs( increment )
if len( self.getExtruderPaths( self.cornerMinimum.z ).loops ) > 0:
increment = - increment
return
self.cornerMinimum.z += self.layerThickness
def setCarveBridgeLayerThickness( self, bridgeLayerThickness ):
"Set the bridge layer thickness. If the infill is not in the direction of the bridge, the bridge layer thickness should be given as None or not set at all."
self.bridgeLayerThickness = bridgeLayerThickness
def setCarveLayerThickness( self, layerThickness ):
"Set the layer thickness."
self.layerThickness = layerThickness
def setCarveImportRadius( self, importRadius ):
"Set the import radius."
self.importRadius = importRadius
def setCarveIsCorrectMesh( self, isCorrectMesh ):
"Set the is correct mesh flag."
self.isCorrectMesh = isCorrectMesh
class TriangleMeshObjectInfo:
"An Art of Illusion object info."
def __init__( self ):
"Set name to None."
self.name = None
def __repr__( self ):
"Get the string representation of this object info."
if self.name == None:
return self.__class__.__name__
return "%s %s\n%s" % ( self.name, self.__class__.__name__, self.triangleMesh )
def getLoops( self, importRadius, z ):
"Get loops sliced through shape."
self.triangleMesh.importRadius = importRadius
return self.triangleMesh.getLoopsFromMesh( z )
def getNewCarvableObjectInfo( self, objectInfoElement ):
"Get new carvable object info."
newCarvableObjectInfo = self.__class__()
newCarvableObjectInfo.name = objectInfoElement.getFirstChildWithClassName( 'name' ).text
newCarvableObjectInfo.object = objectInfoElement.getFirstChildWithClassName( 'object' )
coords = objectInfoElement.getFirstChildWithClassName( 'coords' )
transformAttributeTable = self.getTransformAttributeTable( coords, 'transformFrom' )
if len( transformAttributeTable ) < 16:
transformAttributeTable = self.getTransformAttributeTable( coords, 'transformTo' )
newCarvableObjectInfo.matrix4By4 = Matrix4By4().getFromAttributeTable( transformAttributeTable )
return newCarvableObjectInfo
def getTransformAttributeTable( self, coords, transformName ):
"Get the transform attributes."
transformAttributeTable = coords.getFirstChildWithClassName( transformName ).attributeTable
if len( transformAttributeTable ) < 16:
if 'bf:ref' in transformAttributeTable:
idReference = transformAttributeTable[ 'bf:ref' ]
return coords.rootElement.getSubChildWithID( idReference ).attributeTable
return transformAttributeTable
def getVertices( self ):
"Get all vertices."
return self.triangleMesh.vertices
def setShape( self, matrix4By4 ):
"Set the shape of this carvable object info."
self.triangleMesh = triangle_mesh.TriangleMesh()
vertexElement = self.object.getFirstChildWithClassName( 'vertex' )
vertexPointElements = vertexElement.getChildrenWithClassName( 'bf:Elem' )
for vertexPointElement in vertexPointElements:
coordinateElement = vertexPointElement.getFirstChildWithClassName( 'r' )
vertex = Vector3( float( coordinateElement.attributeTable[ 'x' ] ), float( coordinateElement.attributeTable[ 'y' ] ), float( coordinateElement.attributeTable[ 'z' ] ) )
self.triangleMesh.vertices.append( getVector3TransformedByMatrix( matrix4By4.matrix, vertex ) )
edgeElement = self.object.getFirstChildWithClassName( 'edge' )
edgeSubelements = edgeElement.getChildrenWithClassName( 'bf:Elem' )
for edgeSubelementIndex in xrange( len( edgeSubelements ) ):
edgeSubelement = edgeSubelements[ edgeSubelementIndex ]
vertexIndexes = [ int( edgeSubelement.attributeTable[ 'v1' ] ), int( edgeSubelement.attributeTable[ 'v2' ] ) ]
edge = triangle_mesh.Edge().getFromVertexIndexes( edgeSubelementIndex, vertexIndexes )
self.triangleMesh.edges.append( edge )
faceElement = self.object.getFirstChildWithClassName( 'face' )
faceSubelements = faceElement.getChildrenWithClassName( 'bf:Elem' )
for faceSubelementIndex in xrange( len( faceSubelements ) ):
faceSubelement = faceSubelements[ faceSubelementIndex ]
edgeIndexes = [ int( faceSubelement.attributeTable[ 'e1' ] ), int( faceSubelement.attributeTable[ 'e2' ] ), int( faceSubelement.attributeTable[ 'e3' ] ) ]
face = triangle_mesh.Face().getFromEdgeIndexes( edgeIndexes, self.triangleMesh.edges, faceSubelementIndex )
self.triangleMesh.faces.append( face )
self.bottom = getBottom( self.triangleMesh.vertices )
self.top = getTop( self.triangleMesh.vertices )
class CSGObjectObjectInfo( TriangleMeshObjectInfo ):
"An Art of Illusion CSG object info."
def __repr__( self ):
"Get the string representation of this object info."
if self.name == None:
return self.__class__.__name__
stringRepresentation = '%s %s\n%s' % ( self.name, self.__class__.__name__ )
for subObjectInfo in self.subObjectInfos:
stringRepresentation += '\n%s' % subObjectInfo
return stringRepresentation
def getIntersectedLoops( self, importRadius, subObjectInfoLoopsList ):
"Get intersected loops sliced through shape."
firstLoops = subObjectInfoLoopsList[ 0 ]
lastLoops = getJoinedList( subObjectInfoLoopsList[ 1 : ] )
radiusSide = 0.01 * importRadius
corners = getPointsBoundarySideLoops( True, firstLoops, getJoinedList( lastLoops ), radiusSide )
corners += getPointsBoundarySideLoops( True, lastLoops, getJoinedList( firstLoops ), radiusSide )
corners += getLoopsListsIntersections( subObjectInfoLoopsList )
allPoints = corners[ : ]
allPoints += getInBetweenPointsFromLoopsBoundarySideOtherLoops( True, importRadius, lastLoops, firstLoops, radiusSide )
allPoints += getInBetweenPointsFromLoopsBoundarySideOtherLoops( True, importRadius, firstLoops, lastLoops, radiusSide )
return triangle_mesh.getInclusiveLoops( allPoints, corners, importRadius, False )
def getJoinedLoops( self, importRadius, subObjectInfoLoopsList ):
"Get joined loops sliced through shape."
loops = []
for subObjectInfoLoops in subObjectInfoLoopsList:
loops += subObjectInfoLoops
corners = []
for loop in loops:
corners += loop
corners += getLoopsListsIntersections( subObjectInfoLoopsList )
allPoints = corners[ : ]
allPoints += getInBetweenPointsFromLoops( importRadius, loops )
return triangle_mesh.getInclusiveLoops( allPoints, corners, importRadius, False )
def getLoops( self, importRadius, z ):
"Get loops sliced through shape."
if len( self.subObjectInfos ) < 1:
return []
operationString = self.object.attributeTable[ 'operation' ]
# operationString = '1'#
subObjectInfoLoopsList = getSubObjectInfoLoopsList( importRadius, self.subObjectInfos, z )
if operationString == '0':
return self.getJoinedLoops( importRadius, subObjectInfoLoopsList )
if operationString == '1':
return self.getIntersectedLoops( importRadius, subObjectInfoLoopsList )
if operationString == '2':
return self.getSubtractedLoops( importRadius, subObjectInfoLoopsList )
if operationString == '3':
subObjectInfoLoopsList.reverse()
return self.getSubtractedLoops( importRadius, subObjectInfoLoopsList )
return []
def getSubtractedLoops( self, importRadius, subObjectInfoLoopsList ):
"Get subtracted loops sliced through shape."
negativeLoops = getJoinedList( subObjectInfoLoopsList[ 1 : ] )
positiveLoops = subObjectInfoLoopsList[ 0 ]
radiusSide = 0.01 * importRadius
corners = getPointsBoundarySideLoops( True, positiveLoops, getJoinedList( negativeLoops ), radiusSide )
corners += getPointsBoundarySideLoops( False, negativeLoops, getJoinedList( positiveLoops ), radiusSide )
loopsListsIntersections = getLoopsListsIntersections( subObjectInfoLoopsList )
corners += loopsListsIntersections
allPoints = corners[ : ]
allPoints += getInBetweenPointsFromLoopsBoundarySideOtherLoops( True, importRadius, negativeLoops, positiveLoops, radiusSide )
allPoints += getInBetweenPointsFromLoopsBoundarySideOtherLoops( False, importRadius, positiveLoops, negativeLoops, radiusSide )
return triangle_mesh.getInclusiveLoops( allPoints, corners, importRadius, False )
def getVertices( self ):
"Get all vertices."
vertices = []
for subObjectInfo in self.subObjectInfos:
vertices += subObjectInfo.getVertices()
return vertices
def setShape( self, matrix4By4 ):
"Set the shape of this carvable object info."
self.subObjectInfos = []
addCarvableObjectInfoWithMatrix( self.subObjectInfos, matrix4By4, self.object.getFirstChildWithClassName( 'obj1' ) )
addCarvableObjectInfoWithMatrix( self.subObjectInfos, matrix4By4, self.object.getFirstChildWithClassName( 'obj2' ) )
self.bottom = 999999999.9
self.top = - 999999999.9
for subObjectInfo in self.subObjectInfos:
self.bottom = min( self.bottom, subObjectInfo.bottom )
self.top = max( self.top, subObjectInfo.top )
class CubeObjectInfo( TriangleMeshObjectInfo ):
"An Art of Illusion Cube object info."
def setBottomTopTriangleMesh( self, edgeTriples, matrix4By4, vertexPairs, vertices ):
"Set the bottom, top and triangle mesh of this carvable object info."
self.triangleMesh = triangle_mesh.TriangleMesh()
for vertex in vertices:
self.triangleMesh.vertices.append( getVector3TransformedByMatrix( matrix4By4.matrix, vertex ) )
for vertexPairsIndex in xrange( len( vertexPairs ) ):
vertexPair = vertexPairs[ vertexPairsIndex ]
edge = triangle_mesh.Edge().getFromVertexIndexes( vertexPairsIndex, vertexPair )
self.triangleMesh.edges.append( edge )
for edgeTriplesIndex in xrange( len( edgeTriples ) ):
edgeTriple = edgeTriples[ edgeTriplesIndex ]
face = triangle_mesh.Face().getFromEdgeIndexes( edgeTriple, self.triangleMesh.edges, edgeTriplesIndex )
self.triangleMesh.faces.append( face )
self.bottom = getBottom( self.triangleMesh.vertices )
self.top = getTop( self.triangleMesh.vertices )
def setShape( self, matrix4By4 ):
"Set the shape of this carvable object info."
halfX = float( self.object.attributeTable[ 'halfx' ] )
halfY = float( self.object.attributeTable[ 'halfy' ] )
halfZ = float( self.object.attributeTable[ 'halfz' ] )
vertices = [
Vector3( - 1.0, - 1.0, 1.0 ),
Vector3( 1.0, - 1.0, 1.0 ),
Vector3( 1.0, - 1.0, - 1.0 ),
Vector3( - 1.0, - 1.0, - 1.0 ),
Vector3( - 1.0, 1.0, 1.0 ),
Vector3( 1.0, 1.0, 1.0 ),
Vector3( 1.0, 1.0, - 1.0 ),
Vector3( - 1.0, 1.0, - 1.0 ) ]
for vertex in vertices:
vertex.x *= halfX
vertex.y *= halfY
vertex.z *= halfZ
vertexPairs = [
[ 6, 4 ],
[ 7, 6 ],
[ 6, 2 ],
[ 3, 2 ],
[ 2, 1 ],
[ 3, 1 ],
[ 1, 0 ],
[ 7, 2 ],
[ 6, 1 ],
[ 6, 5 ],
[ 5, 1 ],
[ 4, 3 ],
[ 3, 0 ],
[ 7, 3 ],
[ 5, 0 ],
[ 5, 4 ],
[ 4, 0 ],
[ 7, 4 ] ]
edgeTriples = [
[ 9, 0, 15 ],
[ 1, 2, 7 ],
[ 3, 4, 5 ],
[ 12, 5, 6 ],
[ 13, 7, 3 ],
[ 2, 8, 4 ],
[ 9, 10, 8 ],
[ 16, 11, 12 ],
[ 17, 13, 11 ],
[ 10, 14, 6 ],
[ 15, 16, 14 ],
[ 1, 17, 0 ] ]
self.setBottomTopTriangleMesh( edgeTriples, matrix4By4, vertexPairs, vertices )
class CylinderObjectInfo( CubeObjectInfo ):
"An Art of Illusion Cylinder object info."
def setShape( self, matrix4By4 ):
"Set the shape of this carvable object info."
numberOfSides = 31
height = float( self.object.attributeTable[ 'height' ] )
halfHeight = 0.5 * height
radiusX = float( self.object.attributeTable[ 'rx' ] )
ratioTopOverBottom = float( self.object.attributeTable[ 'ratio' ] )
radiusZ = float( self.object.attributeTable[ 'rz' ] )
vertices = []
sideAngle = 2.0 * math.pi / float( numberOfSides )
halfSideAngle = 0.5 * sideAngle
edgeTriples = []
vertexPairs = []
numberOfVertices = numberOfSides + numberOfSides
numberOfCircumferentialEdges = numberOfVertices + numberOfVertices
for side in xrange( numberOfSides ):
bottomAngle = float( side ) * sideAngle
bottomComplex = euclidean.getPolar( bottomAngle, 1.0 )
bottomPoint = Vector3( bottomComplex.real * radiusX, - halfHeight, bottomComplex.imag * radiusZ )
vertices.append( bottomPoint )
topPoint = Vector3( bottomPoint.x * ratioTopOverBottom, halfHeight, bottomPoint.z * ratioTopOverBottom )
vertices.append( topPoint )
vertexPairBottom = [ side + side, ( side + side + 2 ) % numberOfVertices ]
vertexPairBottomIndex = len( vertexPairs )
vertexPairs.append( vertexPairBottom )
vertexPairDiagonal = [ ( side + side + 2 ) % numberOfVertices, side + side + 1 ]
vertexPairDiagonalIndex = len( vertexPairs )
vertexPairs.append( vertexPairDiagonal )
vertexPairVertical = [ side + side + 1, side + side ]
vertexPairVerticalIndex = len( vertexPairs )
vertexPairs.append( vertexPairVertical )
vertexPairTop = [ side + side + 1, ( side + side + 3 ) % numberOfVertices ]
vertexPairTopIndex = len( vertexPairs )
vertexPairs.append( vertexPairTop )
edgeTripleBottomVertical = [ vertexPairBottomIndex, vertexPairDiagonalIndex, vertexPairVerticalIndex ]
edgeTriples.append( edgeTripleBottomVertical )
edgeTripleBottomVertical = [ vertexPairTopIndex, vertexPairDiagonalIndex, ( vertexPairVerticalIndex + 4 ) % numberOfCircumferentialEdges ]
edgeTriples.append( edgeTripleBottomVertical )
for side in xrange( 2, numberOfSides - 1 ):
vertexPairBottomHorizontal = [ 0, side + side ]
vertexPairs.append( vertexPairBottomHorizontal )
vertexPairTopHorizontal = [ 1, side + side + 1 ]
vertexPairs.append( vertexPairTopHorizontal )
for side in xrange( 1, numberOfSides - 1 ):
vertexPairBottomIndex = 4 * side
vertexPairBottomDiagonalIndex = vertexPairBottomIndex + 4
vertexPairBottomBeforeIndex = vertexPairBottomIndex - 4
vertexPairTopIndex = 4 * side + 3
vertexPairTopDiagonalIndex = vertexPairTopIndex + 4
vertexPairTopBeforeIndex = vertexPairTopIndex - 4
if side > 1:
vertexPairBottomBeforeIndex = numberOfCircumferentialEdges + 2 * side - 4
vertexPairTopBeforeIndex = vertexPairBottomBeforeIndex + 1
if side < numberOfSides - 2:
vertexPairBottomDiagonalIndex = numberOfCircumferentialEdges + 2 * side - 2
vertexPairTopDiagonalIndex = vertexPairBottomDiagonalIndex + 1
edgeTripleBottomHorizontal = [ vertexPairBottomIndex, vertexPairBottomDiagonalIndex, vertexPairBottomBeforeIndex ]
edgeTriples.append( edgeTripleBottomHorizontal )
edgeTripleTopHorizontal = [ vertexPairTopIndex, vertexPairTopDiagonalIndex, vertexPairTopBeforeIndex ]
edgeTriples.append( edgeTripleTopHorizontal )
self.setBottomTopTriangleMesh( edgeTriples, matrix4By4, vertexPairs, vertices )
class SphereObjectInfo( CubeObjectInfo ):
"An Art of Illusion Sphere object info."
def setShape( self, matrix4By4 ):
"Set the shape of this carvable object info."
self.numberOfInBetweens = 19
self.numberOfDivisions = self.numberOfInBetweens + 1
squareRadius = 0.5 * float( self.numberOfInBetweens )
vertexPairs = []
edgeTriples = []
vertices = []
edgeDiagonalTable = {}
edgeHorizontalTable = {}
edgeVerticalTable = {}
vertexTable = {}
for row in xrange( self.numberOfDivisions ):
for column in xrange( self.numberOfDivisions ):
columnMinusRadius = float( column - squareRadius )
rowMinusRadius = float( row - squareRadius )
height = min( squareRadius - abs( columnMinusRadius ), squareRadius - abs( rowMinusRadius ) )
squarePoint = Vector3( rowMinusRadius, columnMinusRadius, - height )
vertexTable[ row, column, 0 ] = len( vertices )
if row != 0 and row != self.numberOfInBetweens and column != 0 and column != self.numberOfInBetweens:
vertices.append( squarePoint )
squarePoint = Vector3( rowMinusRadius, columnMinusRadius, height )
vertexTable[ row, column, 1 ] = len( vertices )
vertices.append( squarePoint )
for row in xrange( self.numberOfInBetweens ):
for column in xrange( self.numberOfDivisions ):
horizontalEdgeBottom = [ vertexTable[ row, column, 0 ], vertexTable[ row + 1, column, 0 ] ]
edgeHorizontalTable[ row, column, 0 ] = len( vertexPairs )
vertexPairs.append( horizontalEdgeBottom )
horizontalEdgeTop = [ vertexTable[ row, column, 1 ], vertexTable[ row + 1, column, 1 ] ]
edgeHorizontalTable[ row, column, 1 ] = len( vertexPairs )
vertexPairs.append( horizontalEdgeTop )
for row in xrange( self.numberOfDivisions ):
for column in xrange( self.numberOfInBetweens ):
verticalEdgeBottom = [ vertexTable[ row, column, 0 ], vertexTable[ row, column + 1, 0 ] ]
edgeVerticalTable[ row, column, 0 ] = len( vertexPairs )
vertexPairs.append( verticalEdgeBottom )
verticalEdgeTop = [ vertexTable[ row, column, 1 ], vertexTable[ row, column + 1, 1 ] ]
edgeVerticalTable[ row, column, 1 ] = len( vertexPairs )
vertexPairs.append( verticalEdgeTop )
for row in xrange( self.numberOfInBetweens ):
for column in xrange( self.numberOfInBetweens ):
diagonalEdgeBottom = [ vertexTable[ row, column, 0 ], vertexTable[ row + 1, column + 1, 0 ] ]
edgeDiagonalTable[ row, column, 0 ] = len( vertexPairs )
vertexPairs.append( diagonalEdgeBottom )
diagonalEdgeTop = [ vertexTable[ row, column, 1 ], vertexTable[ row + 1, column + 1, 1 ] ]
edgeDiagonalTable[ row, column, 1 ] = len( vertexPairs )
vertexPairs.append( diagonalEdgeTop )
for row in xrange( self.numberOfInBetweens ):
for column in xrange( self.numberOfInBetweens ):
fourThirtyOClockFaceBottom = [ edgeHorizontalTable[ row, column, 0 ], edgeVerticalTable[ row + 1, column, 0 ], edgeDiagonalTable[ row, column, 0 ] ]
edgeTriples.append( fourThirtyOClockFaceBottom )
tenThirtyOClockFaceBottom = [ edgeHorizontalTable[ row, column + 1, 0 ], edgeVerticalTable[ row, column, 0 ], edgeDiagonalTable[ row, column, 0 ] ]
edgeTriples.append( tenThirtyOClockFaceBottom )
fourThirtyOClockFaceTop = [ edgeHorizontalTable[ row, column, 1 ], edgeVerticalTable[ row + 1, column, 1 ], edgeDiagonalTable[ row, column, 1 ] ]
edgeTriples.append( fourThirtyOClockFaceTop )
tenThirtyOClockFaceTop = [ edgeHorizontalTable[ row, column + 1, 1 ], edgeVerticalTable[ row, column, 1 ], edgeDiagonalTable[ row, column, 1 ] ]
edgeTriples.append( tenThirtyOClockFaceTop )
radiusX = float( self.object.attributeTable[ 'rx' ] )
radiusY = float( self.object.attributeTable[ 'ry' ] )
radiusZ = float( self.object.attributeTable[ 'rz' ] )
for vertex in vertices:
vertex.normalize()
vertex.x *= radiusX
vertex.y *= radiusY
vertex.z *= radiusZ
self.setBottomTopTriangleMesh( edgeTriples, matrix4By4, vertexPairs, vertices )
globalCarvableClassObjectInfoTable = getCarvableClassObjectInfoTable()
def main( hashtable = None ):
"Display the inset dialog."
if len( sys.argv ) > 1:
getCarving( ' '.join( sys.argv[ 1 : ] ) )
if __name__ == "__main__":
main()
| natetrue/ReplicatorG | skein_engines/skeinforge-0006/skeinforge_tools/import_plugins/xml.py | Python | gpl-2.0 | 37,860 |
# Time-stamp: <2016-03-14 Mon 13:19:27 Shaikh>
def reverse(text):
return text[::-1]
def is_palindrome(text):
return text == reverse(text)
def ex_alpha(text):
result = list()
for l in text:
if l.isalpha():
result.append(l)
return ''.join(result).lower()
something = input('Enter text:')
if is_palindrome(ex_alpha(something)):
print("Yes, it is palindrome.")
else:
print("Oop, no, it's not palindrome.")
| SyrakuShaikh/python | learning/a_byte_of_python/hw_check_palindrome.py | Python | gpl-3.0 | 455 |
#!/usr/bin/python3
# ============================================================================
# Name : des-encryption.py
# Author : Hamza Megahed
# Version : 1.0
# Copyright : Copyright 2014 Hamza Megahed
# Description : DES Encryption Algorithm
# ============================================================================
#
#
# ============================================================================
# This file is part of DES Calculator.
#
# DES Calculator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DES Calculator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DES Calculator. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
import itertools
from keygen import *
# Convert plaintext input from a Hex to binary
pt_hexinput = input("Enter The PlainText in Hex(16 digits):\n")
try:
(int(pt_hexinput, 16))
except:
print ("That is an invalid hex value")
if len(pt_hexinput) == 16:
pass
else: raise ValueError('error')
pt_bininput=bin(int(pt_hexinput, 16))[2:].zfill(64)
pt= []
pt.append(0)
for digit in str(pt_bininput):
pt.append(int(digit))
# Initial permutation
IP= [pt[58], pt[50], pt[42], pt[34], pt[26], pt[18], pt[10], pt[2],
pt[60], pt[52], pt[44], pt[36], pt[28], pt[20], pt[12], pt[4],
pt[62], pt[54], pt[46], pt[38], pt[30], pt[22], pt[14], pt[6],
pt[64], pt[56], pt[48], pt[40], pt[32], pt[24], pt[16], pt[8],
pt[57], pt[49], pt[41], pt[33], pt[25], pt[17], pt[9], pt[1],
pt[59], pt[51], pt[43], pt[35], pt[27], pt[19], pt[11], pt[3],
pt[61], pt[53], pt[45], pt[37], pt[29], pt[21], pt[13], pt[5],
pt[63], pt[55], pt[47], pt[39], pt[31], pt[23], pt[15], pt[7]]
#Permutation Function
def permu(perm):
p= [perm[15], perm[6], perm[19], perm[20],
perm[28], perm[11], perm[27], perm[16],
perm[0], perm[14], perm[22], perm[25],
perm[4], perm[17], perm[30], perm[9],
perm[1], perm[7], perm[23], perm[13],
perm[31], perm[26], perm[2], perm[8],
perm[18], perm[12], perm[29], perm[5],
perm[21], perm[10], perm[3], perm[24]]
return (p)
#Left side
L_IP = [pt[58], pt[50], pt[42], pt[34], pt[26], pt[18], pt[10], pt[2],
pt[60], pt[52], pt[44], pt[36], pt[28], pt[20], pt[12], pt[4],
pt[62], pt[54], pt[46], pt[38], pt[30], pt[22], pt[14], pt[6],
pt[64], pt[56], pt[48], pt[40], pt[32], pt[24], pt[16], pt[8]]
#Right side
R_IP = [pt[57], pt[49], pt[41], pt[33], pt[25], pt[17], pt[9], pt[1],
pt[59], pt[51], pt[43], pt[35], pt[27], pt[19], pt[11], pt[3],
pt[61], pt[53], pt[45], pt[37], pt[29], pt[21], pt[13], pt[5],
pt[63], pt[55], pt[47], pt[39], pt[31], pt[23], pt[15], pt[7]]
#Expand right side from 32 bits to 48 bits
def extend(ex):
EX = [ex[31], ex[0], ex[1], ex[2], ex[3], ex[4],
ex[3], ex[4], ex[5], ex[6], ex[7], ex[8],
ex[7], ex[8], ex[9], ex[10], ex[11], ex[12],
ex[11], ex[12], ex[13], ex[14], ex[15], ex[16],
ex[15], ex[16], ex[17], ex[18], ex[19], ex[20],
ex[19], ex[20], ex[21], ex[22], ex[23], ex[24],
ex[23], ex[24], ex[25], ex[26], ex[27], ex[28],
ex[27], ex[28], ex[29], ex[30], ex[31], ex[0]]
return (EX)
#S-Boxes
def S_Boxes():
S1 = [[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]]
S2 = [[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]]
S3 = [[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]]
S4 = [[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]]
S5 = [[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]]
S6 = [[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]]
S7 = [[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]]
S8 = [[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]
return [S1, S2, S3, S4, S5, S6, S7, S8]
#////////////////////////////////////////////////////////////////////////////#
EX_R=extend(R_IP)
print("Initial Permutation =",format(IP))
print("Left ",format(L_IP))
print("Right ",format(R_IP))
r=1
for x in shift:
print("================================================================================")
print ("==========")
print ("Round ",format(r))
print ("==========\n")
r+=1
print("Expanded Right ",format(EX_R))
new=[]
key=gen(x)
print("Round Key ",format(key))
for i in range(48):
new.append(EX_R[i] ^ key[i])
print("XOR result",format(new))
new= list(map(str, new))
temp=0
temp1=[]
s_box = S_Boxes()
y=0
for x in range (0,48,6):
temp = s_box[y][int(''.join(new[x]+new[x+5]),2)][int(''.join(new[x+1:x+5]),2)]
if y < 8:
y+=1
temp=(bin(int(temp))[2:].zfill(4))
temp1.append([int(i) for i in str(temp)])
temp1 = list(itertools.chain(*temp1))
print("F Function output ",format(temp1))
temp1=permu(temp1)
print("Output of permutation function ",format(temp1))
temp2=[]
for i in range(32):
temp2.append(temp1[i] ^ L_IP[i])
L_IP=R_IP
R_IP=temp2
if r==17:
break
print("New Right ",format(R_IP))
print("New Left ",format(L_IP))
EX_R=extend(R_IP)
R_IP, L_IP = L_IP, R_IP
res=L_IP+R_IP
invIP = [res[39], res[7], res[47], res[15], res[55], res[23], res[63], res[31],
res[38], res[6], res[46], res[14], res[54], res[22], res[62], res[30],
res[37], res[5], res[45], res[13], res[53], res[21], res[61], res[29],
res[36], res[4], res[44], res[12], res[52], res[20], res[60], res[28],
res[35], res[3], res[43], res[11], res[51], res[19], res[59], res[27],
res[34], res[2], res[42], res[10], res[50], res[18], res[58], res[26],
res[33], res[1], res[41], res[9], res[49], res[17], res[57], res[25],
res[32], res[0], res[40], res[8], res[48], res[16], res[56], res[24]]
print("================================================================================\n")
print("CipherText in Binary = ",format(invIP))
invIP= list(map(str, invIP))
invIP=''.join(invIP)
invIP=hex(int(invIP, 2))[2:].zfill(16)
print("CipherText in Hex = ",format(invIP))
| Hamza-Megahed/des-calculator | des-encryption.py | Python | gpl-3.0 | 8,650 |
#!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import path
from os import getcwd
PROJECT_FILECONTENT_TEMPLATE = '{"folders":[{"path": "%p"}]}\n'
PROJET_FILENAME_TEMPLATE = "{0}.sublime-project"
class BaseCommand(object):
def __init__(self, name):
self.args = None
self.name = name
def run(self, ctx):
args = ctx["args"]
progname = ctx["progname"]
parser = argparse.ArgumentParser(prog="{0} {1}".format(progname, self.name))
BaseCommand.add_common_args(parser)
parser.add_argument("-p", "--project", type=str, help="the sublime text project file name")
parser.add_argument("--force", action="store_true", help="override the project file if it exists")
self.args = parser.parse_args(args)
@classmethod
def add_common_args(self, parser):
parser.add_argument("-f", "--folder", type=str, help="the project folder")
class InitCommand(BaseCommand):
def __init__(self):
super(InitCommand, self).__init__("init")
def run(self, ctx):
super(InitCommand, self).run(ctx)
projectRootPath = getcwd()
if self.args.folder:
projectRootPath = path.abspath(self.args.folder)
projectFileName = None
if self.args.project:
projectName = self.args.project
projectFileName = PROJET_FILENAME_TEMPLATE.format(self.args.project)
else:
projectName = path.basename(projectRootPath)
projectFileName = PROJET_FILENAME_TEMPLATE.format(projectName)
projectFile = None
try:
if not self.args.force and path.exists(projectFileName):
print "{0} already exists, use --force to override the project file.".format(projectFileName)
return
projectFile = open(projectFileName, "w")
content = PROJECT_FILECONTENT_TEMPLATE.replace("%p", projectRootPath)
projectFile.write(content)
print "{0} project has been created.".format(projectName)
except Exception, e:
print "Failed to create the project due to {0}.".format(e)
finally:
if projectFile :
projectFile.close()
registry = {'init' : InitCommand()} | acollign/sublp | commands.py | Python | gpl-3.0 | 2,603 |
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_crypto_hash_tree_test` --- lib.crypto.hash_tree unit tests
======================================================================
"""
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.crypto.hash_tree import (
ConnectedHashTree,
HashTree,
)
from lib.defines import (
HASHTREE_EPOCH_TIME,
HASHTREE_EPOCH_TOLERANCE,
)
from lib.packet.scion_addr import ISD_AS
from lib.types import HashType
from test.testcommon import create_mock_full
class TestHashTreeCalcTreeDepth(object):
"""
Unit test for lib.crypto.hash_tree.HashTree.calc_tree_depth
"""
@patch("lib.crypto.hash_tree.HashTree._setup", autospec=True)
def test_for_non2power(self, _):
# Setup
inst = HashTree(ISD_AS("1-11"), "if_ids", "seed", 1, HashType.SHA256)
# Call
inst.calc_tree_depth(6)
# Tests
ntools.eq_(inst._depth, 3)
@patch("lib.crypto.hash_tree.HashTree._setup", autospec=True)
def test_for_2power(self, _):
# Setup
if_ids = [1, 2, 3, 4]
seed = b"abc"
inst = HashTree(ISD_AS("1-11"), if_ids, seed, 1, HashType.SHA256)
# Call
inst.calc_tree_depth(8)
# Tests
ntools.eq_(inst._depth, 3)
class TestHashTreeCreateTree(object):
"""
Unit test for lib.crypto.hash_tree.HashTree.create_tree
"""
@patch("lib.crypto.hash_tree.HASHTREE_N_EPOCHS", 1)
@patch("lib.crypto.hash_tree.HashTree._setup", autospec=True)
@patch("lib.crypto.hash_tree.hash_func_for_type", autospec=True)
def test(self, hash_func_for_type, _):
# Setup
isd_as = ISD_AS("1-11")
if_ids = [1, 2, 3]
hashes = [b"s10", b"10s10", b"s20", b"20s20", b"s30", b"30s30",
b"0", b"30s300", b"10s1020s20", b"10s1020s2030s300"]
hash_func = create_mock_full(side_effect=hashes)
hash_func_for_type.return_value = hash_func
inst = HashTree(isd_as, if_ids, b"s", 1, HashType.SHA256)
inst._depth = 2
# Call
inst.create_tree(if_ids)
# Tests
expected = [b"10s1020s2030s300", b"10s1020s20", b"30s300", b"10s10",
b"20s20", b"30s30", b"0"]
ntools.eq_(inst._nodes, expected)
class TestHashTreeGetProof(object):
"""
Unit test for lib.crypto.hash_tree.HashTree.get_proof
"""
@patch("lib.crypto.hash_tree.HASHTREE_N_EPOCHS", 1)
@patch("lib.crypto.hash_tree.HashTree._setup", autospec=True)
@patch("lib.crypto.hash_tree.hash_func_for_type", autospec=True)
def test(self, hash_func_for_type, _):
# Setup
isd_as = ISD_AS("1-11")
if_ids = [1, 2, 3]
hashes = [b"s10", b"10s10", b"s20", b"20s20", b"s30", b"30s30",
b"0", b"30s300", b"10s1020s20", b"10s1020s2030s300", b"s20"]
hash_func = create_mock_full(side_effect=hashes)
hash_func_for_type.return_value = hash_func
inst = HashTree(isd_as, if_ids, b"s", 1, HashType.SHA256)
inst._depth = 2
inst.create_tree(if_ids)
# Call
proof = inst.get_proof(2, 0, "prev", "next")
# Tests
ntools.eq_(proof.p.isdas, int(isd_as))
ntools.eq_(proof.p.nonce, b"s20")
ntools.eq_(proof.p.siblings[0].isLeft, True)
ntools.eq_(proof.p.siblings[0].hash, b"10s10")
ntools.eq_(proof.p.siblings[1].isLeft, False)
ntools.eq_(proof.p.siblings[1].hash, b"30s300")
@patch("lib.crypto.hash_tree.HASHTREE_N_EPOCHS", 1)
class TestConnectedHashTreeUpdate(object):
"""
Unit test for lib.crypto.hash_tree.ConnectedHashTree.update
"""
def test(self):
# Setup
isd_as = ISD_AS("1-11")
if_ids = [23, 35, 120]
initial_seed = b"qwerty"
inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256)
root1_before_update = inst._ht1._nodes[0]
root2_before_update = inst._ht2._nodes[0]
# Call
new_tree = inst.get_next_tree(isd_as, if_ids, b"new!!seed", HashType.SHA256)
inst.update(new_tree)
# Tests
root0_after_update = inst._ht0_root
root1_after_update = inst._ht1._nodes[0]
ntools.eq_(root1_before_update, root0_after_update)
ntools.eq_(root2_before_update, root1_after_update)
class TestConnectedHashtreeGetPossibleHashes(object):
"""
Unit test for lib.crypto.hash_tree.ConnectedHashTree.get_possible_hashes
"""
@patch("lib.crypto.hash_tree.hash_func_for_type", autospec=True)
def test(self, hash_func_for_type):
# Setup
siblings = []
siblings.append(create_mock_full({"isLeft": True, "hash": "10s10"}))
siblings.append(create_mock_full({"isLeft": False, "hash": "30s300"}))
p = create_mock_full(
{"ifID": 2, "epoch": 0, "nonce": b"s20", "siblings": siblings,
"prevRoot": "p", "nextRoot": "n", "hashType": 0})
rev_info = create_mock_full({"p": p})
hashes = ["20s20", "10s1020s20", "10s1020s2030s300",
"p10s1020s2030s300", "10s1020s2030s300n"]
hash_func = create_mock_full(side_effect=hashes)
hash_func_for_type.return_value = hash_func
# Call
hash01, hash12 = ConnectedHashTree.get_possible_hashes(rev_info)
# Tests
ntools.eq_(hash01, "p10s1020s2030s300")
ntools.eq_(hash12, "10s1020s2030s300n")
class TestConnectedHashTreeUpdateAndVerify(object):
"""
Unit tests for lib.crypto.hash_tree.ConnectedHashTree.verify
used along with lib.crypto.hash_tree.ConnectedHashTree.update
"""
def test(self):
# Check that the revocation proof is verifiable in T.
isd_as = ISD_AS("1-11")
if_ids = [23, 35, 120]
initial_seed = b"qwerty"
inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256)
root = inst.get_root()
# Call
proof = inst.get_proof(120)
# Tests
ntools.eq_(ConnectedHashTree.verify(proof, root), True)
def test_one_timestep(self):
# Check that the revocation proof is verifiable across T and T+1.
# Setup
isd_as = ISD_AS("1-11")
if_ids = [23, 35, 120]
initial_seed = b"qwerty"
inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256)
root = inst.get_root()
# Call
next_tree = inst.get_next_tree(isd_as, if_ids, b"new!!seed", HashType.SHA256)
inst.update(next_tree)
# Tests
proof = inst.get_proof(35) # if_id = 35.
ntools.eq_(ConnectedHashTree.verify(proof, root), True)
def test_two_timesteps(self):
# Check that the revocation proof is "NOT" verifiable across T and T+2.
# Setup
isd_as = ISD_AS("1-11")
if_ids = [23, 35, 120]
initial_seed = b"qwerty"
inst = ConnectedHashTree(isd_as, if_ids, initial_seed, HashType.SHA256)
root = inst.get_root()
# Call
new_tree = inst.get_next_tree(isd_as, if_ids, b"newseed.@1", HashType.SHA256)
inst.update(new_tree)
new_tree = inst.get_next_tree(isd_as, if_ids, b"newseed.@2", HashType.SHA256)
inst.update(new_tree)
# Tests
proof = inst.get_proof(35) # if_id = 35.
ntools.eq_(ConnectedHashTree.verify(proof, root), False)
class TestConnectedHashTreeVerifyEpoch(object):
"""
Unit test for lib.crypto.hash_tree.ConnectedHashTree.verify_epoch
"""
@patch("time.time", autospec=True)
def test_same_epoch(self, time):
# Setup
time.return_value = HASHTREE_EPOCH_TIME + HASHTREE_EPOCH_TOLERANCE + 1
# Call and tests
ntools.eq_(ConnectedHashTree.verify_epoch(1), True)
ntools.eq_(ConnectedHashTree.verify_epoch(2), False)
@patch("time.time", autospec=True)
def test_different_epoch(self, time):
# Setup
time.return_value = HASHTREE_EPOCH_TIME + 1
# Call and test
ntools.eq_(ConnectedHashTree.verify_epoch(0), True)
ntools.eq_(ConnectedHashTree.verify_epoch(1), True)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| dmpiergiacomo/scion | python/test/lib/crypto/hash_tree_test.py | Python | apache-2.0 | 8,740 |
#!/usr/bin/env python
##
# Learning - 5
# I'm learning with my self, so i'm sorry if my language not good.
#
# More on Strings
#
# function array in string
# function find
# function split
##
print("Learning-5") #print text "Learning -5"
print("String on Array")
strings = ['i','love','python','easy','learn','and','learn','again']
for a in strings:
print(a)
print("")
print("Function Find")
string = "I love Python" #this sentence
find = string.find('love') #finding word of sentence
print(find) # if value 2 its true, i love python is 3 word.
print("")
print("Function Split")
print("From = ",string)
print("To = ", string.split( )) #split of setence with single quote
| muhfaris/Learnings | Python/Python/Basic/step_5.py | Python | lgpl-3.0 | 680 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.