code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles ConsoleProxy API requests."""
from oslo_config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova.console import rpcapi as console_rpcapi
from nova.db import base
from nova.openstack.common import uuidutils
CONF = cfg.CONF
CONF.import_opt('console_topic', 'nova.console.rpcapi')
class API(base.Base):
"""API for spinning up or down console proxy connections."""
def __init__(self, **kwargs):
super(API, self).__init__(**kwargs)
def get_consoles(self, context, instance_uuid):
return self.db.console_get_all_by_instance(context, instance_uuid,
columns_to_join=['pool'])
def get_console(self, context, instance_uuid, console_uuid):
return self.db.console_get(context, console_uuid, instance_uuid)
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
rpcapi = console_rpcapi.ConsoleAPI(topic=CONF.console_topic,
server=console['pool']['host'])
rpcapi.remove_console(context, console['id'])
def create_console(self, context, instance_uuid):
# NOTE(mdragon): If we wanted to return this the console info
# here, as we would need to do a call.
# They can just do an index later to fetch
# console info. I am not sure which is better
# here.
instance = self._get_instance(context, instance_uuid)
topic = self._get_console_topic(context, instance['host'])
server = None
if '.' in topic:
topic, server = topic.split('.', 1)
rpcapi = console_rpcapi.ConsoleAPI(topic=topic, server=server)
rpcapi.add_console(context, instance['id'])
def _get_console_topic(self, context, instance_host):
rpcapi = compute_rpcapi.ComputeAPI()
return rpcapi.get_console_topic(context, instance_host)
def _get_instance(self, context, instance_uuid):
if uuidutils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
else:
instance = self.db.instance_get(context, instance_uuid)
return instance
| cloudbase/nova-virtualbox | nova/console/api.py | Python | apache-2.0 | 2,962 |
# Rekall Memory Forensics
# Copyright (c) 2010, 2011, 2012 Michael Ligh <[email protected]>
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from rekall import utils
from rekall.plugins.windows import common
# pylint: disable=protected-access
#References:
# http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/a-performance-issue-in-windows-timer-management/
class Timers(common.WindowsCommandPlugin):
"""Print kernel timers and associated module DPCs.
Ref:
http://computer.forensikblog.de/en/2011/10/timers-and-times.html
"""
__name = "timers"
table_header = [
dict(name="Tbl", cname="Table", align="r", width=3),
dict(name="Offset", cname="offset", style="address"),
dict(name="Due", cname="due_high", width=22),
dict(name="DueTime", cname="due", width=24),
dict(name="ms", cname="period", width=11, align="r"),
dict(name="Sig", cname="signaled", align="r", width=4),
dict(name="Routine", cname="routine", style="address"),
dict(name="Symbol", cname="symbol")
]
def _timers(self):
if self.profile.get_constant("KiTimerTableListHead"):
# On XP x64, Windows 2003 SP1-SP2, and Vista SP0-SP2,
# KiTimerTableListHead is an array of 512 _KTIMER_TABLE_ENTRY
# structs.
if self.profile.has_type("_KTIMER_TABLE_ENTRY"):
lists = self.profile.get_constant_object(
"KiTimerTableListHead",
target="Array",
target_args=dict(
target='_KTIMER_TABLE_ENTRY',
count=512)
)
for i, l in enumerate(lists):
for t in l.Entry.list_of_type("_KTIMER", "TimerListEntry"):
yield i, t
else:
# On XP SP0-SP3 x86 and Windows 2003 SP0, KiTimerTableListHead
# is an array of 256 _LIST_ENTRY for _KTIMERs.
lists = self.profile.get_constant_object(
"KiTimerTableListHead",
target="Array",
target_args=dict(
target='_LIST_ENTRY',
count=256)
)
for i, l in enumerate(lists):
for t in l.list_of_type_fast("_KTIMER", "TimerListEntry"):
yield i, t
else:
# On Windows 7, there is no more KiTimerTableListHead. The list is
# at _KPCR.PrcbData.TimerTable.TimerEntries (credits to Matt Suiche
# for this one. See http://pastebin.com/FiRsGW3f).
kpcr = self.session.plugins.kpcr().kpcr()
for i, table in enumerate(kpcr.Prcb.TimerTable.TimerEntries):
self.session.report_progress("Table %r", table)
for t in table.Entry.list_of_type_fast(
"_KTIMER", "TimerListEntry"):
yield i, t
def timers(self):
"""A generator of timer objects."""
# Sort the timers by address to make them easier to inspect.
for i, timer in self._timers():
# This is the code from reactos which checks for this:
# #define ASSERT_TIMER(E) \
# NT_ASSERT(((E)->Header.Type == TimerNotificationObject) || \
# ((E)->Header.Type == TimerSynchronizationObject))
if timer.Header.Type not in ["TimerNotificationObject",
"TimerNotificationObject"]:
continue
self.session.report_progress("Looking at %#x", timer)
# Ignore timers without DPCs
if (not timer.Dpc.is_valid() or
not timer.Dpc.DeferredRoutine.is_valid()):
continue
yield i, timer
def collect(self):
# Print kuser_shared things.
kuser_shared = self.profile.get_constant_object(
"KI_USER_SHARED_DATA", "_KUSER_SHARED_DATA")
interrupt_time = ((kuser_shared.InterruptTime.High1Time << 32) +
kuser_shared.InterruptTime.LowPart)
now = kuser_shared.SystemTime.as_windows_timestamp() - interrupt_time
seen = set()
for i, timer in self.timers():
if timer in seen:
continue
seen.add(timer)
if timer.Header.SignalState.v():
signaled = "Yes"
else:
signaled = "-"
yield (i,
timer,
# Due time in InterruptTime (100ns).
"0x%0.20x" % timer.DueTime.QuadPart,
self.profile.WinFileTime(value=now+timer.DueTime.QuadPart,
is_utc=True),
timer.Period,
signaled,
timer.Dpc.DeferredRoutine,
utils.FormattedAddress(self.session.address_resolver,
timer.Dpc.DeferredRoutine))
| rainaashutosh/MyTestRekall | rekall-core/rekall/plugins/windows/malware/timers.py | Python | gpl-2.0 | 5,781 |
import hmac
import uuid
import urllib
import hashlib
import httplib as http
from github3.repos.branch import Branch
from framework.exceptions import HTTPError
from website.addons.base.exceptions import HookError
from website.addons.github.api import GitHubClient
MESSAGE_BASE = 'via the Open Science Framework'
MESSAGES = {
'add': 'Added {0}'.format(MESSAGE_BASE),
'move': 'Moved {0}'.format(MESSAGE_BASE),
'copy': 'Copied {0}'.format(MESSAGE_BASE),
'update': 'Updated {0}'.format(MESSAGE_BASE),
'delete': 'Deleted {0}'.format(MESSAGE_BASE),
}
def make_hook_secret():
return str(uuid.uuid4()).replace('-', '')
HOOK_SIGNATURE_KEY = 'X-Hub-Signature'
def verify_hook_signature(node_settings, data, headers):
"""Verify hook signature.
:param GithubNodeSettings node_settings:
:param dict data: JSON response body
:param dict headers: Request headers
:raises: HookError if signature is missing or invalid
"""
if node_settings.hook_secret is None:
raise HookError('No secret key')
digest = hmac.new(
str(node_settings.hook_secret),
data,
digestmod=hashlib.sha1
).hexdigest()
signature = headers.get(HOOK_SIGNATURE_KEY, '').replace('sha1=', '')
if digest != signature:
raise HookError('Invalid signature')
def get_path(kwargs, required=True):
path = kwargs.get('path')
if path:
return urllib.unquote_plus(path)
elif required:
raise HTTPError(http.BAD_REQUEST)
def get_refs(addon, branch=None, sha=None, connection=None):
"""Get the appropriate branch name and sha given the addon settings object,
and optionally the branch and sha from the request arguments.
:param str branch: Branch name. If None, return the default branch from the
repo settings.
:param str sha: The SHA.
:param GitHub connection: GitHub API object. If None, one will be created
from the addon's user settings.
"""
connection = connection or GitHubClient(external_account=addon.external_account)
if sha and not branch:
raise HTTPError(http.BAD_REQUEST)
# Get default branch if not provided
if not branch:
repo = connection.repo(addon.user, addon.repo)
if repo is None:
return None, None, None
branch = repo.default_branch
# Get registered branches if provided
registered_branches = (
[Branch.from_json(b) for b in addon.registration_data.get('branches', [])]
if addon.owner.is_registration
else []
)
registered_branch_names = [
each.name
for each in registered_branches
]
# Fail if registered and branch not in registration data
if registered_branches and branch not in registered_branch_names:
raise HTTPError(http.BAD_REQUEST)
# Get data from GitHub API if not registered
branches = registered_branches or connection.branches(addon.user, addon.repo)
# Use registered SHA if provided
for each in branches:
if branch == each.name:
sha = each.commit.sha
break
return branch, sha, branches
def check_permissions(node_settings, auth, connection, branch, sha=None, repo=None):
user_settings = node_settings.user_settings
has_access = False
has_auth = bool(user_settings and user_settings.has_auth)
if has_auth:
repo = repo or connection.repo(
node_settings.user, node_settings.repo
)
has_access = (
repo is not None and (
'permissions' not in repo.to_json() or
repo.to_json()['permissions']['push']
)
)
if sha:
branches = connection.branches(
node_settings.user, node_settings.repo, branch
)
# TODO Will I ever return false?
is_head = next((True for branch in branches if sha == branch.commit.sha), None)
else:
is_head = True
can_edit = (
node_settings.owner.can_edit(auth) and
not node_settings.owner.is_registration and
has_access and
is_head
)
return can_edit
| zachjanicki/osf.io | website/addons/github/utils.py | Python | apache-2.0 | 4,131 |
def main():
a = int(raw_input())
b = int(raw_input())
soma = a + b
print 'SOMA = {}'.format(soma)
if __name__ == '__main__':
main()
| OrianaCadavid/uri-oj | 1003-SimpleSum.py | Python | mit | 154 |
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.conch.telnet import Telnet
from twisted.internet.defer import Deferred
import datetime
import time
import calendar
import random
import logging
logger = logging.getLogger("main")
timeservers=["time.nist.gov", "time-a.nist.gov", "time-b.nist.gov", "time-nw.nist.gov", "nist1-ny.WiTime.net", "nist1-dc.WiTime.net", "nist1.aol-va.symmetricom.com", "nist1.columbiacountyga.gov", "nist.expertsmi.com", "nist.netservicesgroup.com", "time-a.timefreq.bldrdoc.gov", "time-c.timefreq.bldrdoc.gov", "utcnist.colorado.edu", "utcnist2.colorado.edu", "ntp-nist.ldsbc.edu", "nist1.aol-ca.symmetricom.com", "nist1.symmetricom.com", "nist1-sj.WiTime.net", "nist1-la.WiTime.net"]
class SimpleTelnet(Telnet):
def __init__( self, *args, **kwargs ):
self.deferred = Deferred()
self.data = []
Telnet.__init__(self, *args, **kwargs )
def dataReceived(self, data):
self.data.append( data )
def connectionLost( self, reason ):
self.deferred.callback( "".join(self.data) )
def getTimeOffset():
client = ClientCreator(reactor, SimpleTelnet)
server = timeservers.pop(0)
logger.debug( "Requesting time from %s." % server )
d = client.connectTCP(server, 13, timeout=5)
d.addCallback( _getTimeOffsetCallback, server )
d.addErrback( _getTimeOffsetErrback, 0 )
return d
def _getTimeOffsetErrback( error, count ):
if count < 5:
client = ClientCreator(reactor, SimpleTelnet)
server = timeservers.pop()
logger.debug( "Attempt %s failed, requesting time from %s." % (count + 1, server) )
d = client.connectTCP(server, 13, timeout=5)
d.addCallback( _getTimeOffsetCallback, server )
d.addErrback( _getTimeOffsetErrback, count + 1 )
return d
else:
logger.debug( "Could not fetch time after %s attempts." % count )
return error
def _getTimeOffsetCallback( simple_telnet, server ):
logger.debug( "Connected to time server %s." % server )
simple_telnet.deferred.addCallback( _getTimeOffsetCallback2, server )
return simple_telnet.deferred
def _getTimeOffsetCallback2( data, server ):
logger.debug( "Got time from %s." % server )
t = datetime.datetime(
2000 + int(data[7:9]),
int(data[10:12]),
int(data[13:15]),
int(data[16:18]),
int(data[19:21]),
int(data[22:24]) )
offset = calendar.timegm( t.timetuple() ) - time.time()
return offset
| hiidef/hiispider | legacy/timeoffset.py | Python | mit | 2,547 |
from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
| ckclark/leetcode | py/remove-boxes.py | Python | apache-2.0 | 2,106 |
# -*- coding: utf-8 -*-
#
"""
This file is used by this package to configure both the master
and the slave processes
"""
with open("master_node", 'r') as stream:
MASTER_NODE = stream.read().strip()
# Broker
BROKER_URL = "amqp://" + MASTER_NODE
# Backend
CELERY_RESULT_BACKEND = "redis://" + MASTER_NODE + "/1"
| salmanebah/distributed-make | src/celeryconfig.py | Python | gpl-2.0 | 318 |
# Authors: Eberhard Eich <[email protected]>
#
# License: Simplified BSD
from jumeg_mft_funcs import (apply_mft, calc_cdm_w_cut,
compare_est_exp, fit_cdm_w_cut,
scan_cdm_w_cut)
from jumeg_mft_plot import (plot_cdm_data, plot_cdv_distribution,
plot_cdvsum_data,
plot_global_cdv_dist, plot_max_amplitude_data,
plot_max_cdv_data, plot_quality_data,
plot_visualize_mft_sources)
| fboers/jumegX | mft/__init__.py | Python | bsd-3-clause | 549 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import fixture as fixture_config
from ceilometer.meter import notifications
from ceilometer import sample
from ceilometer.tests import base as test
def fake_uuid(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
NOW = datetime.datetime.isoformat(datetime.datetime.utcnow())
TABLE_CREATE_PAYLOAD = {
u'table_uuid': fake_uuid('r'),
u'index_count': 2,
u'table_name': u'email_data'
}
TABLE_DELETE_PAYLOAD = {
u'table_uuid': fake_uuid('r'),
u'table_name': u'email_data'
}
NOTIFICATION_TABLE_CREATE = {
u'_context_request_id': u'req-d6e9b7ec-976f-443f-ba6e-e2b89b18aa75',
u'_context_tenant': fake_uuid('t'),
u'_context_user': fake_uuid('u'),
u'_context_auth_token': u'',
u'_context_show_deleted': False,
u'_context_is_admin': u'False',
u'_context_read_only': False,
'payload': TABLE_CREATE_PAYLOAD,
'publisher_id': u'magnetodb.winterfell.com',
'message_id': u'3d71fb8a-f1d7-4a4e-b29f-7a711a761ba1',
'event_type': u'magnetodb.table.create.end',
'timestamp': NOW,
'priority': 'info'
}
NOTIFICATION_TABLE_DELETE = {
u'_context_request_id': u'req-d6e9b7ec-976f-443f-ba6e-e2b89b18aa75',
u'_context_tenant': fake_uuid('t'),
u'_context_user': fake_uuid('u'),
u'_context_auth_token': u'',
u'_context_show_deleted': False,
u'_context_is_admin': u'False',
u'_context_read_only': False,
'payload': TABLE_DELETE_PAYLOAD,
'publisher_id': u'magnetodb.winterfell.com',
'message_id': u'4c8f5940-3c90-41af-ac16-f0e3055a305d',
'event_type': u'magnetodb.table.delete.end',
'timestamp': NOW,
'priority': 'info'
}
class TestNotification(test.BaseTestCase):
def setUp(self):
super(TestNotification, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF.set_override(
'meter_definitions_cfg_file',
self.path_get('etc/ceilometer/meters.yaml'), group='meter')
self.handler = notifications.ProcessMeterNotifications(mock.Mock())
def _verify_common_counter(self, c, name, volume):
self.assertIsNotNone(c)
self.assertEqual(name, c.name)
self.assertEqual(fake_uuid('r'), c.resource_id)
self.assertEqual(NOW, c.timestamp)
self.assertEqual(volume, c.volume)
metadata = c.resource_metadata
self.assertEqual(u'magnetodb.winterfell.com', metadata.get('host'))
def test_create_table(self):
counters = list(self.handler.process_notification(
NOTIFICATION_TABLE_CREATE))
self.assertEqual(2, len(counters))
table = [item for item in counters
if item.name == "magnetodb.table.create"][0]
self._verify_common_counter(table, 'magnetodb.table.create', 1)
self.assertEqual(fake_uuid('u'), table.user_id)
self.assertEqual(fake_uuid('t'), table.project_id)
self.assertEqual(sample.TYPE_GAUGE, table.type)
def test_delete_table(self):
counters = list(self.handler.process_notification(
NOTIFICATION_TABLE_DELETE))
self.assertEqual(1, len(counters))
table = counters[0]
self._verify_common_counter(table, 'magnetodb.table.delete', 1)
self.assertEqual(fake_uuid('u'), table.user_id)
self.assertEqual(fake_uuid('t'), table.project_id)
self.assertEqual(sample.TYPE_GAUGE, table.type)
def test_index_count(self):
counters = list(self.handler.process_notification(
NOTIFICATION_TABLE_CREATE))
self.assertEqual(2, len(counters))
table = [item for item in counters
if item.name == "magnetodb.table.index.count"][0]
self._verify_common_counter(table, 'magnetodb.table.index.count', 2)
self.assertEqual(fake_uuid('u'), table.user_id)
self.assertEqual(fake_uuid('t'), table.project_id)
self.assertEqual(sample.TYPE_GAUGE, table.type)
| r-mibu/ceilometer | ceilometer/tests/key_value_storage/test_notifications.py | Python | apache-2.0 | 4,566 |
# -*- coding:utf-8 -*-
def proxy_method(member, method):
"""
call method on self.member or member() instead of self
member:
a callable which has a member named method
or a string indicate the name of a member on self
method:
proxied method
usage:
class A(object):
def __init__(self, list_value):
self.value = list_value
__getitem__ = proxy_method('value', '__getitem__')
a = A([1,2,3])
print a[0] # 1
"""
members = [member]
def proxy(self, *args, **kwargs):
member = members[0]
if callable(member):
member = member()
else:
member = getattr(self, member)
return getattr(member, method)(*args, **kwargs)
return proxy
| semonalbertyeah/pyutils | pyutils/func.py | Python | mit | 856 |
'''
Touch: Base for all touch objects
Every touch in PyMT derives from the abstract Touch class.
A touch can have more or less attributes, depending on the provider.
For example, the TUIO provider can give you a lot of information about the touch,
like position, acceleration, width/height of the shape and so on.
Another provider might just give you x/y coordinates and pressure.
We call these attributes "capabilities". Every touch indicates its
capabilities in its "profile" property.
A profile is just a simple list with strings, containing for example:
* pos (property x, y)
* pos3d (property x, y, z)
* mov (tuio/property X, Y)
* mov3d (tuio/property X, Y, Z)
* dim (tuio/property w, h)
* dim3d (tuio/property w, h, d)
* markerid (tuio/property i (fid property))
* sessionid (tuio/property s (id property))
* angle (tuio/property a)
* angle3D (tuio/property a, b, c)
* rotacc (tuio/property A)
* rotacc3d (tuio/property A, B, C)
* motacc (tuio/property m)
* shape (property shape)
* kinetic
* ... and others could be added by new classes
If you're only interested in a certain kind of touches, check the profile::
def on_touch_down(self, touch):
if 'markerid' not in touch.profile:
# not a fiducial, not interesting
return
'''
__all__ = ('Touch', )
import weakref
from inspect import isroutine
from copy import copy
from pymt.utils import SafeList
from pymt.clock import getClock
from pymt.vector import Vector
class TouchMetaclass(type):
def __new__(mcs, name, bases, attrs):
__attrs__ = []
for base in bases:
if hasattr(base, '__attrs__'):
__attrs__.extend(base.__attrs__)
if '__attrs__' in attrs:
__attrs__.extend(attrs['__attrs__'])
attrs['__attrs__'] = tuple(__attrs__)
return super(TouchMetaclass, mcs).__new__(mcs, name, bases, attrs)
class Touch(object):
'''Abstract class to represent a touch, and support TUIO 1.0 definition.
:Parameters:
`id` : str
uniq ID of the touch
`args` : list
list of parameters, passed to depack() function
'''
__metaclass__ = TouchMetaclass
__uniq_id = 0
__attrs__ = \
('device', 'attr',
'id', 'sx', 'sy', 'sz', 'profile',
'x', 'y', 'z', 'shape',
'dxpos', 'dypos', 'dzpos',
'oxpos', 'oypos', 'ozpos',
'dsxpos', 'dsypos', 'dszpos',
'osxpos', 'osypos', 'oszpos',
'time_start', 'is_double_tap',
'double_tap_time', 'userdata')
def __init__(self, device, id, args):
if self.__class__ == Touch:
raise NotImplementedError, 'class Touch is abstract'
# Uniq ID
Touch.__uniq_id += 1
self.uid = Touch.__uniq_id
self.device = device
# For push/pop
self.attr = []
self.default_attrs = (
'x', 'y', 'z',
'dxpos', 'dypos', 'dzpos',
'oxpos', 'oypos', 'ozpos')
# For grab
self.grab_list = SafeList()
self.grab_exclusive_class = None
self.grab_state = False
self.grab_current = None
# TUIO definition
self.id = id
self.sx = 0.0
self.sy = 0.0
self.sz = 0.0
self.profile = ('pos', )
# new parameters
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.shape = None
self.dxpos = None
self.dypos = None
self.dzpos = None
self.oxpos = None
self.oypos = None
self.ozpos = None
self.dsxpos = None
self.dsypos = None
self.dszpos = None
self.osxpos = None
self.osypos = None
self.oszpos = None
self.time_start = getClock().get_time()
self.is_double_tap = False
self.double_tap_time = 0
self.userdata = {}
self.depack(args)
def depack(self, args):
'''Depack `args` into attributes in class'''
if self.dsxpos is None:
self.dsxpos = self.osxpos = self.sx
self.dsypos = self.osypos = self.sy
self.dszpos = self.oszpos = self.sz
def grab(self, class_instance, exclusive=False):
'''Grab a touch. You can grab a touch if you absolutly want to receive
on_touch_move() and on_touch_up(), even if the touch is not dispatched
by your parent ::
def on_touch_down(self, touch):
touch.grab(self)
def on_touch_move(self, touch):
if touch.grab_current == self:
# i receive my grabbed touch
else:
# it's a normal touch
def on_touch_up(self, touch):
if touch.grab_current == self:
# i receive my grabbed touch, i must ungrab it !
touch.ungrab(self)
else:
# it's a normal touch
'''
if self.grab_exclusive_class is not None:
raise Exception('Cannot grab the touch, touch are exclusive')
class_instance = weakref.ref(class_instance)
if exclusive:
self.grab_exclusive_class = class_instance
self.grab_list.append(class_instance)
def ungrab(self, class_instance):
'''Ungrab a previous grabbed touch'''
class_instance = weakref.ref(class_instance)
if self.grab_exclusive_class == class_instance:
self.grab_exclusive_class = None
if class_instance in self.grab_list:
self.grab_list.remove(class_instance)
def move(self, args):
'''Move the touch to another position.'''
self.dxpos = self.x
self.dypos = self.y
self.dzpos = self.z
self.dsxpos = self.sx
self.dsypos = self.sy
self.dszpos = self.sz
self.depack(args)
def scale_for_screen(self, w, h, p=None, rotation=0):
'''Scale position for the screen'''
sx, sy = self.sx, self.sy
if rotation == 0:
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 90:
sx, sy = sy, 1-sx
self.x = sx * float(h)
self.y = sy * float(w)
elif rotation == 180:
sx, sy = 1-sx, 1-sy
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 270:
sx, sy = 1-sy, sx
self.x = sx * float(h)
self.y = sy * float(w)
if p:
self.z = self.sz * float(p)
if self.oxpos is None:
self.dxpos = self.oxpos = self.x
self.dypos = self.oypos = self.y
self.dzpos = self.ozpos = self.z
def push(self, attrs=None):
'''Push attributes values in `attrs` in the stack'''
if attrs is None:
attrs = self.default_attrs
values = [getattr(self, x) for x in attrs]
self.attr.append((attrs, values))
def pop(self):
'''Pop attributes values from the stack'''
attrs, values = self.attr.pop()
for i in xrange(len(attrs)):
setattr(self, attrs[i], values[i])
def apply_transform_2d(self, transform):
'''Apply a transformation on x, y, dxpos, dypos, oxpos, oypos'''
self.x, self.y = transform(self.x, self.y)
self.dxpos, self.dypos = transform(self.dxpos, self.dypos)
self.oxpos, self.oypos = transform(self.oxpos, self.oypos)
def copy_to(self, to):
'''Copy some attribute to another touch object.'''
for attr in self.__attrs__:
to.__setattr__(attr, copy(self.__getattribute__(attr)))
def __str__(self):
classname = str(self.__class__).split('.')[-1].replace('>', '').replace('\'', '')
return '<%s spos=%s pos=%s>' % (classname, str(self.spos), str(self.pos))
def distance(self, other_touch):
return Vector(self.pos).distance(other_touch.pos)
def __repr__(self):
out = []
for x in dir(self):
v = getattr(self, x)
if x[0] == '_':
continue
if isroutine(v):
continue
out.append('%s="%s"' % (x, v))
return '<%s %s>' % (
self.__class__.__name__,
' '.join(out)
)
# facility
@property
def pos(self):
'''Return position of the touch in the screen coordinate
system (self.x, self.y)'''
return self.x, self.y
@property
def dpos(self):
'''Return previous position of the touch in the
screen coordinate system (self.dxpos, self.dypos)'''
return self.dxpos, self.dypos
@property
def opos(self):
'''Return the initial position of the touch in the screen
coordinate system (self.oxpos, self.oypos)'''
return self.oxpos, self.oypos
@property
def spos(self):
'''Return the position in the 0-1 coordinate system
(self.sx, self.sy)'''
return self.sx, self.sy
# compatibility bridge
xpos = property(lambda self: self.x)
ypos = property(lambda self: self.y)
blobID = property(lambda self: self.id)
| nuigroup/pymt-widgets | pymt/input/touch.py | Python | lgpl-3.0 | 9,232 |
#!/usr/bin/python
"""
Script that allow to turn on/off the My_Homessistant app by stopping flask
and set state in myh.json used in the manager
"""
import getopt
import json
import logging
import os
import subprocess
import sys
from logging.handlers import RotatingFileHandler
import psutil
def helper():
msg = """
Manage MyHomessistant services
--help|-h|-?
Display help
--list|-l
List of services to send signal to
--signal|-s
Send the corresponding signal to MyHomessistant services
--debug|-d
Print logs also into console
:Examples:
myh.py -h
myh.py -s status -l flask,app
myh.py -s stop -l flask
myh.py -s start -l all
"""
print msg
def mylogger(debug):
# Logger
log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')
logfile = os.path.join(os.environ["MYH_HOME"], 'logs', 'manager.log')
file_handler = RotatingFileHandler(logfile, mode='a', maxBytes=5 * 1024 * 1024,
backupCount=2, encoding=None, delay=0)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.DEBUG)
mylogger = logging.getLogger('myh')
mylogger.setLevel(logging.DEBUG)
if debug:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
mylogger.addHandler(console_handler)
mylogger.addHandler(file_handler)
return mylogger
def flask_is_up():
cmd = "bash $MYH_HOME/etc/init.d/myh_check_flask.sh"
logger.debug("Verify flask running.")
logger.debug("Command launched : " + cmd)
process = subprocess.Popen(cmd, shell=True)
process.wait()
return process.returncode
if __name__ == "__main__":
help = False
debug = False
sig = "status"
service_list = "all"
# Get arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:s:", ["help", "list=", "signal="])
except getopt.GetoptError:
helper()
exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
help = True
elif opt in ("-l", "--list"):
service_list = arg
elif opt in ("-s", "--signal"):
sig = arg
elif opt in ("-d", "--debug"):
debug = True
logger = mylogger(debug)
if not sig in ["status", "start", "restart", "stop"]:
logger.error("Wrong signal %s" % sig)
sys.exit(1)
if help:
helper()
sys.exit(0)
if sig == "restart":
cmd = "python %s -s stop -l %s" % (os.path.abspath(__file__), service_list)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
cmd_exit = process.returncode
logger.debug("python stop exit code %s " % str(cmd_exit))
cmd = "python %s -s start -l %s" % (os.path.abspath(__file__), service_list)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
cmd_exit = process.returncode
logger.debug("python stop exit code %s " % str(cmd_exit))
exit(cmd_exit)
# Do the work
myh_file = os.path.join(os.environ["MYH_HOME"], "data", "myh.json")
if service_list == "all":
service_list = "flask,app"
for service in service_list.split(','):
with open(myh_file, 'r') as myh_file_data:
myh_dict = json.load(myh_file_data)
if service == "flask":
if sig == "status":
if myh_dict["flask_state"].lower() == "on":
logger.info("Flask should be running")
if flask_is_up() != 0:
logger.info("Flask is actually stopped")
else:
logger.info("Flask is not running")
continue
if myh_dict["flask_state"].lower() == "on":
if sig == "start":
# Verification
if flask_is_up() != 0:
myh_dict["flask_state"] = "OFF"
with open(myh_file, 'w') as f:
myh_dict = json.dump(myh_dict, f)
logger.info("Relaunch flask")
# Relaunch flask
cmd = "python %s -s start -l flask" % (os.path.abspath(__file__))
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
cmd_exit = process.returncode
logger.debug("Python start Flask exit code %s " % str(cmd_exit))
else:
logger.info("Flask is already running")
elif sig == "stop":
flask_pid = myh_dict["flask_pid"]
if str(flask_pid) == "-1":
# App not really running
myh_dict["flask_state"] = "OFF"
with open(myh_file, 'w') as f:
myh_dict = json.dump(myh_dict, f)
logger.debug("flask_pid at -1 means flask is not really running" + str(process.pid))
continue
try:
os.kill(flask_pid, 0)
except OSError:
logger.warning("Flask is not running at " + str(flask_pid))
continue
try:
parent = psutil.Process(flask_pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
logger.info("Flask has been stopped")
except OSError:
logger.warning("Error while killing flask at " + str(flask_pid))
else:
# Flask state is off
if sig == "stop":
logger.info("Flask is already stopped")
elif sig == "start":
rest_api = os.path.join(os.environ["MYH_HOME"], "api", "rest.py")
cmd = "python %s" % rest_api
logger.debug("Command launched : " + cmd)
process = subprocess.Popen(cmd, shell=True)
myh_dict["flask_pid"] = int(process.pid)
myh_dict["flask_state"] = "ON"
with open(myh_file, 'w') as f:
myh_dict = json.dump(myh_dict, f)
logger.info("Flask has been launched at pid " + str(process.pid))
elif service == "app":
if sig == "status":
if myh_dict["app_state"].lower() == "on":
logger.info("App is running")
else:
logger.info("App is not running")
continue
if myh_dict["app_state"].lower() == "on":
if sig == "start":
logger.info("App is already running")
elif sig == "stop":
myh_dict["app_state"] = "OFF"
with open(myh_file, 'w') as f:
json.dump(myh_dict, f)
logger.info("App turned off")
else:
# Flask state is off
if sig == "stop":
logger.info("App is already stopped")
elif sig == "start":
myh_dict["app_state"] = "ON"
with open(myh_file, 'w') as f:
json.dump(myh_dict, f)
logger.info("App turned on")
else:
logger.error("wrong service %s" % service)
sys.exit(2)
| vchatela/My-Homessistant | etc/init.d/myh.py | Python | mit | 7,826 |
from django.core.urlresolvers import reverse
from sentry.app import tsdb
from sentry.testutils import APITestCase
class TeamStatsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team(name='foo')
project_1 = self.create_project(team=team, name='a')
project_2 = self.create_project(team=team, name='b')
team_2 = self.create_team(name='bar')
project_3 = self.create_project(team=team_2, name='c')
tsdb.incr(tsdb.models.project, project_1.id, count=3)
tsdb.incr(tsdb.models.project, project_2.id, count=5)
tsdb.incr(tsdb.models.project, project_3.id, count=10)
url = reverse('sentry-api-0-team-stats', kwargs={
'organization_slug': team.organization.slug,
'team_slug': team.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data[-1][1] == 8, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 24
| Kryz/sentry | tests/sentry/api/endpoints/test_team_stats.py | Python | bsd-3-clause | 1,130 |
from unittest import TestCase, main
from socketio.namespace import BaseNamespace
from socketio.virtsocket import Socket
from mock import MagicMock
class MockSocketIOServer(object):
"""Mock a SocketIO server"""
def __init__(self, *args, **kwargs):
self.sockets = {}
def get_socket(self, socket_id=''):
return self.sockets.get(socket_id)
class MockSocket(Socket):
pass
class ChatNamespace(BaseNamespace):
def __init__(self, *args, **kwargs):
self.use_set = args[0]
super(ChatNamespace, self).__init__(*args[1:], **kwargs)
def get_initial_acl(self):
acls = ['on_foo']
if self.use_set == True:
return set(acls)
else:
return acls
def on_foo(self):
return 'a'
def on_bar(self):
return 'b'
def on_baz(foo, bar, baz):
return 'c'
class GlobalNamespace(BaseNamespace):
def on_woot(self):
return ''
def on_tobi(self):
return ''
class TestBaseNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = GlobalNamespace(self.environ, '/woot')
def test_process_packet_disconnect(self):
pkt = {'type': 'disconnect',
'endpoint': '/woot'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_connect(self):
"""processing a connection packet """
pkt = {'type': 'connect',
'endpoint': '/tobi',
'qs': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing a connection packet with query string
pkt = {'type': 'connect',
'endpoint': '/test',
'qs': '?test=1'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_heartbeat(self):
"""processing a heartbeat packet """
pkt = {'type': 'heartbeat',
'endpoint': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_message(self):
"""processing a message packet """
pkt = {'type': 'message',
'endpoint': '',
'data': 'woot'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing a message packet with id and endpoint
pkt = {'type': 'message',
'id': 5,
'ack': True,
'endpoint': '/tobi',
'data': ''}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_json(self):
"""processing json packet """
pkt = {'type': 'json',
'endpoint': '',
'data': '2'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing json packet with message id and ack data
pkt = {'type': 'json',
'id': 1,
'endpoint': '',
'ack': 'data',
'data': {u'a': u'b'}}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_event(self):
"""processing an event packet """
pkt = {'type': 'event',
'name': 'woot',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing an event packet with message id and ack
pkt = {'type': 'event',
'id': 1,
'ack': 'data',
'name': 'tobi',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_ack(self):
"""processing a ack packet """
pkt = {'type': 'ack',
'ackId': 140,
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_error(self):
"""processing error packet """
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
pkt = {'type': 'error',
'reason': 'transport not supported',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with reason and advice
pkt = {'type': 'error',
'reason': 'unauthorized',
'advice': 'reconnect',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with endpoint
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': '/woot'}
self.ns.process_packet(pkt)
def test_process_packet_message_with_new_line(self):
"""processing a newline in a message"""
pkt = {'type': 'message',
'data': '\n',
'endpoint': ''}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
message = ("Trying to delete an ACL method, but none were"
+ " defined yet! Or: No ACL restrictions yet, why would you"
+ " delete one?")
try:
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
except ValueError as e:
self.assertEqual(
message,
e.message,
)
else:
raise Exception("""We should not be able to delete an acl that
doesn't exist""")
def test_allowed_event_name_regex(self):
pkt = {'type': 'event',
'name': '$foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = ['unallowed_event_name',
'name must only contains alpha numerical characters',
]
kwargs = dict(msg_id=None, endpoint='/woot', quiet=False)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_method_not_found(self):
""" test calling a method that doesn't exist """
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []
}
self.ns.process_packet(pkt)
kwargs = dict(
msg_id=None,
endpoint='/woot',
quiet=False
)
self.environ['socketio'].error.assert_called_with(
'no_such_method',
'The method "%s" was not found' % 'on_foo',
**kwargs
)
class TestChatNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = ChatNamespace(
False,
self.environ,
'/chat'
)
def test_allowed_event(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_blocked_event(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_bar"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_add_acl_method(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_foo"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_lift_acl_restrictions(self):
pkt1 = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.lift_acl_restrictions()
self.ns.process_packet(pkt1)
assert not self.environ['socketio'].error.called
pkt2 = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt2)
assert not self.environ['socketio'].error.called
def test_use_set_on_acl(self):
self.ns = ChatNamespace(
True,
self.environ,
'/chat'
)
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_call_method_invalid_definition(self):
pkt = {'type': 'event',
'name': 'baz',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_baz')
self.ns.process_packet(pkt)
kwargs = dict(msg_id=None, endpoint='/chat', quiet=False)
self.environ['socketio'].error.assert_called_with(
"invalid_method_args",
"The server-side method is invalid, as it doesn't "
"have 'self' as its first argument"
, **kwargs)
if __name__ == '__main__':
main()
| grokcore/dev.lexycross | wordsmithed/src/gevent-socketio/tests/test_namespace.py | Python | mit | 11,026 |
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from base64 import decodestring
import os
import re
from warnings import warn
from qtconsole.qt import QtCore, QtGui
from ipython_genutils.path import ensure_dir_exists
from traitlets import Bool
from qtconsole.svg import save_svg, svg_to_clipboard, svg_to_image
from .jupyter_widget import JupyterWidget
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
latex_to_png = None
class LatexError(Exception):
"""Exception for Latex errors"""
class RichIPythonWidget(JupyterWidget):
"""Dummy class for config inheritance. Destroyed below."""
class RichJupyterWidget(RichIPythonWidget):
""" An JupyterWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichJupyterWidget protected class variables.
_payload_source_plot = 'ipykernel.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichJupyterWidget.
"""
kw['kind'] = 'rich'
super(RichJupyterWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
self._jpg_supported = 'jpeg' in QtGui.QImageReader.supportedImageFormats()
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichJupyterWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichJupyterWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
"""Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_execute_result(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
elif 'text/latex' in data:
self._pre_image_append(msg, prompt_number)
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_execute_result(msg)
def _handle_display_data(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("display_data: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
self.log.debug("display: %s", msg.get('content', ''))
if 'image/svg+xml' in data:
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
elif 'text/latex' in data and latex_to_png:
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichJupyterWidget' protected interface
#---------------------------------------------------------------------------
def _is_latex_math(self, latex):
"""
Determine if a Latex string is in math mode
This is the only mode supported by qtconsole
"""
basic_envs = ['math', 'displaymath']
starable_envs = ['equation', 'eqnarray' 'multline', 'gather', 'align',
'flalign', 'alignat']
star_envs = [env + '*' for env in starable_envs]
envs = basic_envs + starable_envs + star_envs
env_syntax = [r'\begin{{{0}}} \end{{{0}}}'.format(env).split() for env in envs]
math_syntax = [
(r'\[', r'\]'), (r'\(', r'\)'),
('$$', '$$'), ('$', '$'),
]
for start, end in math_syntax + env_syntax:
inner = latex[len(start):-len(end)]
if start in inner or end in inner:
return False
if latex.startswith(start) and latex.endswith(end):
return True
return False
def _append_latex(self, latex, before_prompt=False, metadata=None):
""" Append latex data to the widget."""
png = None
if self._is_latex_math(latex):
png = latex_to_png(latex, wrap=False, backend='dvipng')
if png is None and latex.startswith('$') and latex.endswith('$'):
# matplotlib only supports strings enclosed in dollar signs
png = latex_to_png(latex, wrap=False, backend='matplotlib')
if png:
self._append_png(png, before_prompt, metadata)
else:
raise LatexError
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
ensure_dir_exists(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n', str(ba.toBase64().data().decode())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
# clobber RichIPythonWidget above:
class RichIPythonWidget(RichJupyterWidget):
"""Deprecated class. Use RichJupyterWidget"""
def __init__(self, *a, **kw):
warn("RichIPythonWidget is deprecated, use RichJupyterWidget")
super(RichIPythonWidget, self).__init__(*a, **kw)
| nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/qtconsole/rich_jupyter_widget.py | Python | mit | 17,100 |
###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = [
"PerMessageCompressOffer",
"PerMessageCompressOfferAccept",
"PerMessageCompressResponse",
"PerMessageCompressResponseAccept",
"PerMessageCompress",
"PerMessageDeflateOffer",
"PerMessageDeflateOfferAccept",
"PerMessageDeflateResponse",
"PerMessageDeflateResponseAccept",
"PerMessageDeflate",
"PERMESSAGE_COMPRESSION_EXTENSION"
]
from autobahn.websocket.compress_base import *
from autobahn.websocket.compress_deflate import *
## class for "permessage-deflate" is always available
##
PERMESSAGE_COMPRESSION_EXTENSION = {
PerMessageDeflateMixin.EXTENSION_NAME: {
'Offer': PerMessageDeflateOffer,
'OfferAccept': PerMessageDeflateOfferAccept,
'Response': PerMessageDeflateResponse,
'ResponseAccept': PerMessageDeflateResponseAccept,
'PMCE': PerMessageDeflate
}
}
## include "permessage-bzip2" classes if bzip2 is available
##
try:
import bz2
from autobahn.websocket.compress_bzip2 import *
PMCE = {
'Offer': PerMessageBzip2Offer,
'OfferAccept': PerMessageBzip2OfferAccept,
'Response': PerMessageBzip2Response,
'ResponseAccept': PerMessageBzip2ResponseAccept,
'PMCE': PerMessageBzip2
}
PERMESSAGE_COMPRESSION_EXTENSION[PerMessageBzip2Mixin.EXTENSION_NAME] = PMCE
__all__.extend(["PerMessageBzip2Offer",
"PerMessageBzip2OfferAccept",
"PerMessageBzip2Response",
"PerMessageBzip2ResponseAccept",
"PerMessageBzip2"])
except ImportError:
pass
## include "permessage-snappy" classes if Snappy is available
##
try:
import snappy
from autobahn.websocket.compress_snappy import *
PMCE = {
'Offer': PerMessageSnappyOffer,
'OfferAccept': PerMessageSnappyOfferAccept,
'Response': PerMessageSnappyResponse,
'ResponseAccept': PerMessageSnappyResponseAccept,
'PMCE': PerMessageSnappy
}
PERMESSAGE_COMPRESSION_EXTENSION[PerMessageSnappyMixin.EXTENSION_NAME] = PMCE
__all__.extend(["PerMessageSnappyOffer",
"PerMessageSnappyOfferAccept",
"PerMessageSnappyResponse",
"PerMessageSnappyResponseAccept",
"PerMessageSnappy"])
except ImportError:
pass
| msmolens/VTK | ThirdParty/AutobahnPython/autobahn/websocket/compress.py | Python | bsd-3-clause | 3,077 |
"""
Project: django-use_x_forwarded_for
Author: Kellen Green
Date: 04/14/14
Version: 1.0.2
""" | kellengreen/django-use_x_forwarded_for | use_x_forwarded_for/__init__.py | Python | mit | 110 |
class _fake_sys:
def __init__(self):
self.stdin = process.stdin
self.stdout = process.stdout
self.stderr = process.stderr
self.argv = process.argv
def exit(self):
process.exit()
sys = _fake_sys()
| pombredanne/PythonJS | pythonjs/fakelibs/sys.py | Python | bsd-3-clause | 211 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def __init__(self,
project,
zone,
tpu_names,
job_name='tpu_worker',
credentials='default',
service=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
project: Name of the GCP project containing Cloud TPUs
zone: Zone where the TPUs are located
tpu_names: A list of names of the target Cloud TPUs.
job_name: Name of the TensorFlow job the TPUs belong to.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._tpu_names = tpu_names
self._job_name = job_name
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'TPU cluster resolver')
self._service = discovery.build(
'tpu', 'v1alpha1',
credentials=self._credentials)
else:
self._service = service
def get_master(self):
"""Get the ClusterSpec grpc master path.
This returns the grpc path (grpc://1.2.3.4:8470) of first instance in the
ClusterSpec returned by the cluster_spec function. This is suitable for use
for the `master` argument in tf.Session() when you are using one TPU.
Returns:
string, the grpc path of the first instance in the ClusterSpec.
Raises:
ValueError: If none of the TPUs specified exists.
"""
job_tasks = self.cluster_spec().job_tasks(self._job_name)
if not job_tasks:
raise ValueError('No TPUs exists with the specified names exist.')
return 'grpc://' + job_tasks[0]
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
"""
worker_list = []
for tpu_name in self._tpu_names:
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, tpu_name)
request = self._service.projects().locations().nodes().get(name=full_name)
response = request.execute()
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list.append(instance_url)
return ClusterSpec({self._job_name: worker_list})
| jwlawson/tensorflow | tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py | Python | apache-2.0 | 4,680 |
from uchan.lib.cache import cache, cache_key, LocalCache
from uchan.lib.database import session
from uchan.lib.model import SiteConfigModel
from uchan.lib.ormmodel import ConfigOrmModel
MESSAGE_SITE_CONFIG_EXISTS = 'Site config already exists'
# def create_site(site_config: SiteConfigModel):
# with session() as s:
# exiting = s.query(ConfigOrmModel).filter_by(type='site').one_or_none()
# if exiting:
# raise ArgumentError(MESSAGE_SITE_CONFIG_EXISTS)
#
# m = site_config.to_orm_model()
# s.add(m)
# r = SiteConfigModel.from_orm_model(m)
# s.commit()
# return r
local_site_config_cache = LocalCache()
def update_site(site_config: SiteConfigModel):
with session() as s:
s.merge(site_config.to_orm_model())
s.commit()
cache.set(cache_key('config_site'), site_config.to_cache())
def get_site() -> SiteConfigModel:
local_cached = local_site_config_cache.get('site_config')
if local_cached:
return local_cached.copy()
cached = cache.get(cache_key('config_site'))
if cached:
res = SiteConfigModel.from_cache(cached)
else:
with session() as s:
m = s.query(ConfigOrmModel).filter_by(type='site').one_or_none()
if m:
res = SiteConfigModel.from_orm_model(m)
else:
res = SiteConfigModel.from_defaults()
s.commit()
cache.set(cache_key('config_site'), res.to_cache())
local_site_config_cache.set('site_config', res)
return res
| Floens/uchan | uchan/lib/repository/configs.py | Python | mit | 1,570 |
#!/usr/bin/python
# Terminator by Chris Jones <[email protected]>
# GPL v2 only
"""titlebar.py - classes necessary to provide a terminal title bar"""
from gi.repository import Gtk, Gdk
from gi.repository import GObject
from gi.repository import Pango
import random
import itertools
from version import APP_NAME
from util import dbg
from terminator import Terminator
from editablelabel import EditableLabel
from translation import _
# pylint: disable-msg=R0904
# pylint: disable-msg=W0613
class Titlebar(Gtk.EventBox):
"""Class implementing the Titlebar widget"""
terminator = None
terminal = None
config = None
oldtitle = None
termtext = None
sizetext = None
label = None
ebox = None
groupicon = None
grouplabel = None
groupentry = None
bellicon = None
__gsignals__ = {
'clicked': (GObject.SignalFlags.RUN_LAST, None, ()),
'edit-done': (GObject.SignalFlags.RUN_LAST, None, ()),
'create-group': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_STRING,)),
}
def __init__(self, terminal):
"""Class initialiser"""
GObject.GObject.__init__(self)
self.terminator = Terminator()
self.terminal = terminal
self.config = self.terminal.config
self.label = EditableLabel()
self.label.connect('edit-done', self.on_edit_done)
self.ebox = Gtk.EventBox()
grouphbox = Gtk.HBox()
self.grouplabel = Gtk.Label(ellipsize='end')
self.groupicon = Gtk.Image()
self.bellicon = Gtk.Image()
self.bellicon.set_no_show_all(True)
self.groupentry = Gtk.Entry()
self.groupentry.set_no_show_all(True)
self.groupentry.connect('focus-out-event', self.groupentry_cancel)
self.groupentry.connect('activate', self.groupentry_activate)
self.groupentry.connect('key-press-event', self.groupentry_keypress)
groupsend_type = self.terminator.groupsend_type
if self.terminator.groupsend == groupsend_type['all']:
icon_name = 'all'
elif self.terminator.groupsend == groupsend_type['group']:
icon_name = 'group'
elif self.terminator.groupsend == groupsend_type['off']:
icon_name = 'off'
self.set_from_icon_name('_active_broadcast_%s' % icon_name,
Gtk.IconSize.MENU)
grouphbox.pack_start(self.groupicon, False, True, 2)
grouphbox.pack_start(self.grouplabel, False, True, 2)
grouphbox.pack_start(self.groupentry, False, True, 2)
self.ebox.add(grouphbox)
self.ebox.show_all()
self.bellicon.set_from_icon_name('terminal-bell', Gtk.IconSize.MENU)
viewport = Gtk.Viewport(hscroll_policy='natural')
viewport.add(self.label)
hbox = Gtk.HBox()
hbox.pack_start(self.ebox, False, True, 0)
hbox.pack_start(Gtk.VSeparator(), False, True, 0)
hbox.pack_start(viewport, True, True, 0)
hbox.pack_end(self.bellicon, False, False, 2)
self.add(hbox)
hbox.show_all()
self.set_no_show_all(True)
self.show()
self.connect('button-press-event', self.on_clicked)
def connect_icon(self, func):
"""Connect the supplied function to clicking on the group icon"""
self.ebox.connect('button-press-event', func)
def update(self, other=None):
"""Update our contents"""
default_bg = False
if self.config['title_hide_sizetext']:
self.label.set_text("%s" % self.termtext)
else:
self.label.set_text("%s %s" % (self.termtext, self.sizetext))
if (not self.config['title_use_system_font']) and self.config['title_font']:
title_font = Pango.FontDescription(self.config['title_font'])
else:
title_font = Pango.FontDescription(self.config.get_system_prop_font())
self.label.modify_font(title_font)
self.grouplabel.modify_font(title_font)
if other:
term = self.terminal
terminator = self.terminator
if other == 'window-focus-out':
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
group_fg = self.config['title_inactive_fg_color']
group_bg = self.config['title_inactive_bg_color']
elif term != other and term.group and term.group == other.group:
if terminator.groupsend == terminator.groupsend_type['off']:
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
else:
title_fg = self.config['title_receive_fg_color']
title_bg = self.config['title_receive_bg_color']
icon = '_receive_on'
group_fg = self.config['title_receive_fg_color']
group_bg = self.config['title_receive_bg_color']
elif term != other and not term.group or term.group != other.group:
if terminator.groupsend == terminator.groupsend_type['all']:
title_fg = self.config['title_receive_fg_color']
title_bg = self.config['title_receive_bg_color']
icon = '_receive_on'
else:
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
group_fg = self.config['title_inactive_fg_color']
group_bg = self.config['title_inactive_bg_color']
else:
# We're the active terminal
title_fg = self.config['title_transmit_fg_color']
title_bg = self.config['title_transmit_bg_color']
if terminator.groupsend == terminator.groupsend_type['all']:
icon = '_active_broadcast_all'
elif terminator.groupsend == terminator.groupsend_type['group']:
icon = '_active_broadcast_group'
else:
icon = '_active_broadcast_off'
group_fg = self.config['title_transmit_fg_color']
group_bg = self.config['title_transmit_bg_color']
self.label.modify_fg(Gtk.StateType.NORMAL,
Gdk.color_parse(title_fg))
self.grouplabel.modify_fg(Gtk.StateType.NORMAL,
Gdk.color_parse(group_fg))
self.modify_bg(Gtk.StateType.NORMAL,
Gdk.color_parse(title_bg))
if not self.get_desired_visibility():
if default_bg == True:
color = term.get_style_context().get_background_color(Gtk.StateType.NORMAL) # VERIFY FOR GTK3
else:
color = Gdk.color_parse(title_bg)
self.update_visibility()
self.ebox.modify_bg(Gtk.StateType.NORMAL,
Gdk.color_parse(group_bg))
self.set_from_icon_name(icon, Gtk.IconSize.MENU)
def update_visibility(self):
"""Make the titlebar be visible or not"""
if not self.get_desired_visibility():
dbg('hiding titlebar')
self.hide()
self.label.hide()
else:
dbg('showing titlebar')
self.show()
self.label.show()
def get_desired_visibility(self):
"""Returns True if the titlebar is supposed to be visible. False if
not"""
if self.editing() == True or self.terminal.group:
dbg('implicit desired visibility')
return(True)
else:
dbg('configured visibility: %s' % self.config['show_titlebar'])
return(self.config['show_titlebar'])
def set_from_icon_name(self, name, size = Gtk.IconSize.MENU):
"""Set an icon for the group label"""
if not name:
self.groupicon.hide()
return
self.groupicon.set_from_icon_name(APP_NAME + name, size)
self.groupicon.show()
def update_terminal_size(self, width, height):
"""Update the displayed terminal size"""
self.sizetext = "%sx%s" % (width, height)
self.update()
def set_terminal_title(self, widget, title):
"""Update the terminal title"""
self.termtext = title
self.update()
# Return False so we don't interrupt any chains of signal handling
return False
def set_group_label(self, name):
"""Set the name of the group"""
if name:
self.grouplabel.set_text(name)
self.grouplabel.show()
else:
self.grouplabel.set_text('')
self.grouplabel.hide()
self.update_visibility()
def on_clicked(self, widget, event):
"""Handle a click on the label"""
self.show()
self.label.show()
self.emit('clicked')
def on_edit_done(self, widget):
"""Re-emit an edit-done signal from an EditableLabel"""
self.emit('edit-done')
def editing(self):
"""Determine if we're currently editing a group name or title"""
return(self.groupentry.get_property('visible') or self.label.editing())
def create_group(self):
"""Create a new group"""
if self.terminal.group:
self.groupentry.set_text(self.terminal.group)
else:
defaultmembers=[_('Alpha'),_('Beta'),_('Gamma'),_('Delta'),_('Epsilon'),_('Zeta'),_('Eta'),
_('Theta'),_('Iota'),_('Kappa'),_('Lambda'),_('Mu'),_('Nu'),_('Xi'),
_('Omicron'),_('Pi'),_('Rho'),_('Sigma'),_('Tau'),_('Upsilon'),_('Phi'),
_('Chi'),_('Psi'),_('Omega')]
currentgroups=set(self.terminator.groups)
for i in range(1,4):
defaultgroups=set(map(''.join, list(itertools.product(defaultmembers,repeat=i))))
freegroups = list(defaultgroups-currentgroups)
if freegroups:
self.groupentry.set_text(random.choice(freegroups))
break
else:
self.groupentry.set_text('')
self.groupentry.show()
self.grouplabel.hide()
self.groupentry.grab_focus()
self.update_visibility()
def groupentry_cancel(self, widget, event):
"""Hide the group name entry"""
self.groupentry.set_text('')
self.groupentry.hide()
self.grouplabel.show()
self.get_parent().grab_focus()
def groupentry_activate(self, widget):
"""Actually cause a group to be created"""
groupname = self.groupentry.get_text() or None
dbg('Titlebar::groupentry_activate: creating group: %s' % groupname)
self.groupentry_cancel(None, None)
last_focused_term=self.terminator.last_focused_term
if self.terminal.targets_for_new_group:
[term.titlebar.emit('create-group', groupname) for term in self.terminal.targets_for_new_group]
self.terminal.targets_for_new_group = None
else:
self.emit('create-group', groupname)
last_focused_term.grab_focus()
self.terminator.focus_changed(last_focused_term)
def groupentry_keypress(self, widget, event):
"""Handle keypresses on the entry widget"""
key = Gdk.keyval_name(event.keyval)
if key == 'Escape':
self.groupentry_cancel(None, None)
def icon_bell(self):
"""A bell signal requires we display our bell icon"""
self.bellicon.show()
GObject.timeout_add(1000, self.icon_bell_hide)
def icon_bell_hide(self):
"""Handle a timeout which means we now hide the bell icon"""
self.bellicon.hide()
return(False)
def get_custom_string(self):
"""If we have a custom string set, return it, otherwise None"""
if self.label.is_custom():
return(self.label.get_text())
else:
return(None)
def set_custom_string(self, string):
"""Set a custom string"""
self.label.set_text(string)
self.label.set_custom()
GObject.type_register(Titlebar)
| guoxiao/terminator-gtk3 | terminatorlib/titlebar.py | Python | gpl-2.0 | 12,493 |
#!/usr/bin/python
from k5test import *
for realm in multipass_realms(create_user=False):
# Test kinit with a keytab.
realm.kinit(realm.host_princ, flags=['-k'])
realm = K5Realm(get_creds=False)
# Test kinit with a partial keytab.
pkeytab = realm.keytab + '.partial'
realm.run([ktutil], input=('rkt %s\ndelent 1\nwkt %s\n' %
(realm.keytab, pkeytab)))
realm.kinit(realm.host_princ, flags=['-k', '-t', pkeytab])
# Test kinit with no keys for client in keytab.
output = realm.kinit(realm.user_princ, flags=['-k'], expected_code=1)
if 'no suitable keys' not in output:
fail('Expected error not seen in kinit output')
# Test kinit and klist with client keytab defaults.
realm.extract_keytab(realm.user_princ, realm.client_keytab);
realm.kinit(realm.user_princ, flags=['-k', '-i'])
realm.klist(realm.user_princ)
out = realm.run([klist, '-k', '-i'])
if realm.client_keytab not in out or realm.user_princ not in out:
fail('Expected output not seen from klist -k -i')
# Test implicit request for keytab (-i or -t without -k)
realm.run([kdestroy])
output = realm.kinit(realm.host_princ, flags=['-t', realm.keytab])
if 'keytab specified, forcing -k' not in output:
fail('Expected output not seen from kinit -t keytab')
realm.klist(realm.host_princ)
realm.run([kdestroy])
output = realm.kinit(realm.user_princ, flags=['-i'])
if 'keytab specified, forcing -k' not in output:
fail('Expected output not seen from kinit -i')
realm.klist(realm.user_princ)
# Test handling of kvno values beyond 255.
princ = 'foo/bar@%s' % realm.realm
realm.addprinc(princ)
os.remove(realm.keytab)
realm.run_kadminl('modprinc -kvno 252 %s' % princ)
for kvno in range(253, 259):
realm.run_kadminl('ktadd -k %s %s' % (realm.keytab, princ))
realm.kinit(princ, flags=['-k'])
realm.klist_keytab(princ)
os.remove(realm.keytab)
output = realm.run_kadminl('getprinc %s' % princ)
if 'Key: vno 258,' not in output:
fail('Expected vno not seen in kadmin.local output')
# Test parameter expansion in profile variables
realm.stop()
conf = {'libdefaults': {
'default_keytab_name': 'testdir/%{null}abc%{uid}',
'default_client_keytab_name': 'testdir/%{null}xyz%{uid}'}}
realm = K5Realm(krb5_conf=conf, create_kdb=False)
del realm.env['KRB5_KTNAME']
del realm.env['KRB5_CLIENT_KTNAME']
uidstr = str(os.getuid())
out = realm.run([klist, '-k'], expected_code=1)
if 'FILE:testdir/abc%s' % uidstr not in out:
fail('Wrong keytab in klist -k output')
out = realm.run([klist, '-ki'], expected_code=1)
if 'FILE:testdir/xyz%s' % uidstr not in out:
fail('Wrong keytab in klist -ki output')
success('Keytab-related tests')
| drankye/kerb-token | krb5/src/tests/t_keytab.py | Python | apache-2.0 | 2,663 |
"""SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunc++.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import SCons
import os
import re
import subprocess
cplusplus = __import__('c++', globals(), locals(), [])
package_info = {}
def get_package_info(package_name, pkginfo, pkgchk):
try:
return package_info[package_name]
except KeyError:
version = None
pathname = None
try:
sadm_contents = open('/var/sadm/install/contents', 'r').read()
except EnvironmentError:
pass
else:
sadm_re = re.compile('^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M)
sadm_match = sadm_re.search(sadm_contents)
if sadm_match:
pathname = os.path.dirname(sadm_match.group(1))
try:
p = subprocess.Popen([pkginfo, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkginfo_contents = p.communicate()[0]
version_re = re.compile('^ *VERSION:\s*(.*)$', re.M)
version_match = version_re.search(pkginfo_contents)
if version_match:
version = version_match.group(1)
if pathname is None:
try:
p = subprocess.Popen([pkgchk, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkgchk_contents = p.communicate()[0]
pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M)
pathname_match = pathname_re.search(pkgchk_contents)
if pathname_match:
pathname = os.path.dirname(pathname_match.group(1))
package_info[package_name] = (pathname, version)
return package_info[package_name]
# use the package installer tool lslpp to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.subst('$CXX')
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
for package in ['SPROcpl']:
path, version = get_package_info(package, pkginfo, pkgchk)
if path and version:
cppcPath, cppcVersion = path, version
break
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | src/engine/SCons/Tool/sunc++.py | Python | mit | 4,729 |
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/vpcom/chugv_ws/devel;/home/vpcom/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| mvpcom/CyberHandsFaraz | ROS/chugv_ws/devel/_setup_util.py | Python | gpl-3.0 | 12,302 |
import unittest
from django_toolkit.pagination import RangeBasedPaginator
class RangeBasedPaginatorTestCase(unittest.TestCase):
def test_num_pages(self):
self.assertEqual(RangeBasedPaginator(10, 5).num_pages, 2)
self.assertEqual(RangeBasedPaginator(10, 1).num_pages, 10)
self.assertEqual(RangeBasedPaginator(10, 10).num_pages, 1)
self.assertEqual(RangeBasedPaginator(10, 20).num_pages, 1)
def test_page_range(self):
self.assertEqual(RangeBasedPaginator(10, 5).page_range, [1, 2])
self.assertEqual(RangeBasedPaginator(10, 1).page_range, range(1, 11))
self.assertEqual(RangeBasedPaginator(10, 10).page_range, [1])
self.assertEqual(RangeBasedPaginator(10, 20).page_range, [1])
def test_paging(self):
paginator = RangeBasedPaginator(10, 5)
page = paginator.page(1)
self.assertEqual(page.start_index(), 1)
self.assertEqual(page.end_index(), 5)
page = paginator.page(2)
self.assertEqual(page.start_index(), 6)
self.assertEqual(page.end_index(), 10)
| alexhayes/django-toolkit | django_toolkit/tests/pagination.py | Python | mit | 1,100 |
from Generic_BPMDevice import *
#import sys, os
#sys.path.insert(0, os.path.abspath('..'))
from pkg_resources import require
require("numpy")
import numpy as np
class Simulated_BPMDevice(Generic_BPMDevice):
"""Simulated BPM device used for testing without the hardware.
All of the abstract methods in the parent class must be overridden. This class has
access to the RF device used in the testing so that it can read in the signals that
are supposedly provided to it via it's RF inputs.
Attributes:
attenuation (float): Attenuation produced by the virtual splitter and cables
RFSim (RF Simulator Obj) : Reference to an RF simulator
GateSim (Gate Source Simulator Obj) : Reference to a gate source simulator
"""
def __init__(self, RFSim, GateSim=None):
"""Initializes the Libera BPM device object and assigns it an ID.
Args:
RFSim (RFSignalGenerator Obj): The interface object that has access to an RF device
this is needed in the simulator so it can access the input values that would
normally come through the signals supplied to the devices inputs.
GateSim (Gate_Source Object): The interface object that has access to a Gate Source
device. This will typically be a simulated GateSource, this is an input to this
class so it know what signals are being sent to it.
Returns:
"""
print("Simulated BPM device accessed on virtual channel")
self.attenuation = 12 # Typical attenuation when using a 4 way splitter and cables
self.RFSim = RFSim # Instance of the RF source used, allows the simulator to know what signals are output
self.GateSim = GateSim # Instance of the Gate device, allows the simulator to know what signals are output
def get_X_position (self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: X position in mm
"""
return 0.0 # With an equal splitter there should be no X shift
def get_Y_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: Y position in mm
"""
return 0.0 # With an equal splitter there should be no Y shift
def get_beam_current(self):
"""Override method, gets the beam current read by the BPMs.
By measuring the output power from the RF device, the input power can be assumed, then an equation extracted
from the Rigol 30303 and Libera BPM device can be used to give an estimate of the current.
Args:
Returns:
float: Current in mA
"""
current = self.get_input_power() # Gets the current input power
current = 1000 * (1.1193) ** current # Extracted equation from Rigol3030 vs Libera BPM measurements
return current
def get_input_power(self):
"""Override method, gets the input power of the signals input to the device
This function assumes that a standard 4 way splitter is used, that combined with the cable losses give an
estimated loss of 12 dBm. This is then taken off of the output power set by the RF device giving the result.
Args:
Returns:
float: Input power in dBm
"""
if self.GateSim == None: # Checks if the simulation is using a gate source
return self.RFSim.get_output_power()[0] - self.attenuation
elif self.GateSim.get_modulation_state() == False: # Checks if the simulated gate source is enabled
return self.RFSim.get_output_power()[0] - self.attenuation
else: # gate source must be present and enabled
dutycycle = self.GateSim.get_pulse_dutycycle() # Get the current duty cycle
log_cycle = 20 * np.log10(dutycycle) # Convert the duty cycle into dB
# factor the duty cycle into the power read by the simulated BPM
return (self.RFSim.get_output_power()[0] - np.absolute(log_cycle)) - self.attenuation
def get_raw_BPM_buttons(self):
"""Override method, gets the raw signal from each BPM.
Args:
Returns:
int: Raw signal from BPM A
int: Raw signal from BPM B
int: Raw signal from BPM C
int: Raw signal from BPM D
"""
ADC = 1000 * self.get_beam_current() # Gets a linear value for the BPM
return ADC, ADC, ADC, ADC
def get_normalised_BPM_buttons(self):
"""Override method, gets the normalised signal from each BPM.
Args:
Returns:
float: Normalised signal from BPM A
float: Normalised signal from BPM B
float: Normalised signal from BPM C
float: Normalised signal from BPM D
"""
return 1, 1, 1, 1 # Assumes all BPM pickups are equal
def get_device_ID(self):
"""Override method, gets the type of BPM device that the device is
Args:
Returns:
str: Device type
"""
return "Simulated BPM Device"
def get_ADC_sum(self):
"""Override method, gets the maximum input power the device can take
The devices will break if the input power is too high, as such, each device has their
own tolerances, this function will return this tolerance. It should be used to ensure
that the power put into the device is not too high to break the device.
Args:
Returns:
float: max input power in dBm
"""
a, b, c, d = self.get_raw_BPM_buttons()
sum = a + b + c + d # Sums the BPM values used in the simulator
return sum
def get_input_tolerance(self):
"""Override method, gets the maximum input power the device can take
The devices will break if the input power is too high, as such, each device has their
own tolerances, this function will return this tolerance. It should be used to ensure
that the power put into the device is not too high to break the device.
Args:
Returns:
float: max input power in dBm
"""
return -40 # Max tolerance of the simulated device, as low as the most susceptible real device
| dharryman/BPM_Test_Framework | BPMDevice/Simulated_BPMDevice.py | Python | apache-2.0 | 6,549 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import warnings
from PIL import Image
from padpt import padptdb, pt, sheet
class TestSheet(unittest.TestCase):
def setUp(self):
self.monsters = padptdb.read_monsters(
'tests/data/db/monsters.csv',
'tests/data/db/icons')
def test_generate_sheet_00(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sheet.generate_sheet(
pt=pt.PT(
title='ミル降臨',
party_a=(
pt.Member(
monster=self.monsters[2903],
assist=self.monsters[2012]),
pt.Member(
monster=self.monsters[2948],
assist=None),
pt.Member(
monster=self.monsters[1730],
assist=self.monsters[3162]),
pt.Member(
monster=self.monsters[2948],
assist=None),
pt.Member(
monster=self.monsters[2948],
assist=None)),
party_b=(
pt.Member(
monster=self.monsters[2903],
assist=self.monsters[923]),
pt.Member(
monster=self.monsters[2752],
assist=None),
pt.Member(
monster=self.monsters[2948],
assist=None),
pt.Member(
monster=self.monsters[2948],
assist=None),
pt.Member(
monster=self.monsters[2948],
assist=None)),
note=('1F: Aディオス\n'
'2F: Bアヴァロン\n'
'3F: Aハーデス,ディオス\n'
'4F: Bディオス\n'
'5F: Aラー\n'
'6F: Aディオス\n'
'7F: Bディオス\n'
'8F: A0コンボ,Bタナトス,A0コンボ,B0コンボ,A0コンボ,Bディオス\n')),
timestamp='2016-09-22',
font_path='/System/Library/Fonts/ヒラギノ角ゴ ProN W6.otf',
png_path='tests/data/png/',
out_path='tests/out/testsheet_00.png')
Image.open('tests/out/testsheet_00.png').show()
self.assertEqual(
'y',
input('OK? [y/n]'))
def test_generate_sheet_01(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sheet.generate_sheet(
pt=pt.PT(
title='金曜ダンジョン(超地獄級)',
party_a=(
pt.Member(
monster=self.monsters[2657],
assist=None),
pt.Member(
monster=self.monsters[2368],
assist=None),
pt.Member(
monster=self.monsters[2179],
assist=None),
pt.Member(
monster=self.monsters[2179],
assist=None),
pt.Member(
monster=self.monsters[2006],
assist=None)),
party_b=(
pt.Member(
monster=self.monsters[2657],
assist=None),),
note=('1F: ハンジ,赤オーディン\n'
'2F: 赤オーディン\n'
'3F: 五右衛門\n'
'4F: 崩す\n')),
timestamp='2016-09-22',
font_path='/System/Library/Fonts/ヒラギノ角ゴ ProN W6.otf',
png_path='tests/data/png/',
out_path='tests/out/testsheet_01.png')
Image.open('tests/out/testsheet_01.png').show()
self.assertEqual(
'y',
input('OK? [y/n]'))
if __name__ == '__main__':
unittest.main()
| wotsushi/padpt | tests/testsheet.py | Python | mit | 4,591 |
"""Translation helper functions."""
import locale
import os
import re
import sys
import warnings
import gettext as gettext_module
from cStringIO import StringIO
from google.appengine._internal.django.utils.importlib import import_module
from google.appengine._internal.django.utils.safestring import mark_safe, SafeData
from google.appengine._internal.django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4.
"""
def __init__(self, *args, **kw):
from google.appengine._internal.django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
if isinstance(language, basestring) and language == 'no':
warnings.warn(
"The use of the language code 'no' is deprecated. "
"Please use the 'nb' translation instead.",
PendingDeprecationWarning
)
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from google.appengine._internal.django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from google.appengine._internal.django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from google.appengine._internal.django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
for t in Lexer(src, None).tokenize():
if intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(lambda x, y: -cmp(x[1], y[1]))
return result
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# and are kept for backward compatibility.
# Note, it's also important to keep format names marked for translation.
# For compatibility we still want to have formats on translation catalogs.
# That makes template code like {{ my_date|date:_('DATE_FORMAT') }} still work
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
PendingDeprecationWarning
)
from google.appengine._internal.django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_partial_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
PendingDeprecationWarning
)
from google.appengine._internal.django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/_internal/django/utils/translation/trans_real.py | Python | bsd-3-clause | 20,698 |
# This file is part of GooCalendar. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from ._calendar import Calendar
from ._event import Event, EventStore
__all__ = ['Calendar', 'EventStore', 'Event']
__version__ = '0.3'
| ajeebkp23/goocalendar | goocalendar/__init__.py | Python | gpl-2.0 | 286 |
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
t -- Student's T
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
nanmean -- Mean, ignoring NaN values
nanstd -- Standard deviation, ignoring NaN values
nanmedian -- Median, ignoring NaN values
variation -- Coefficient of variation
.. autosummary::
:toctree: generated/
cumfreq _
histogram2 _
histogram _
itemfreq _
percentileofscore _
scoreatpercentile _
relfreq _
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
sem
zmap
zscore
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .rv import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
#remove vonmises_cython from __all__, I don't know why it is included
__all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))]
from numpy.testing import Tester
test = Tester().test
| vhaasteren/scipy | scipy/stats/__init__.py | Python | bsd-3-clause | 8,414 |
# Copyright (C) 2013 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from construct import *
from six import BytesIO
# FIXME: Without this hack, context inside If/Switch is incorrectly placed
# inside "_" (just for sizeof())
class FixedSwitch(Switch):
def _sizeof(self, ctx):
while ctx.get("_"):
ctx = ctx._
return Switch._sizeof(self, ctx)
def FixedIf(predicate, subcon):
return FixedSwitch(subcon.name, lambda ctx: bool(predicate(ctx)),
{
True: subcon,
False: Pass,
}
)
class SwapAdapter(Adapter):
def _encode(self, obj, context):
return "".join(reversed(obj))
def _decode(self, obj, context):
return "".join(reversed(obj))
class _DataAdapter(Adapter):
def __init__(self, subcon, data_field = "data", len_field = "dlen"):
Adapter.__init__(self, subcon)
self.data_field = data_field
self.len_field = len_field
def _encode(self, obj, ctx):
if isinstance(obj[self.data_field], str):
obj[self.len_field] = len(obj[self.data_field])
return obj
obj[self.len_field] = 0
self.subcon._build(obj, BytesIO(), ctx)
s = BytesIO()
data = filter(lambda x: x.name == self.data_field, self.subcon.subcons)[0]
data._build(obj[self.data_field], s, obj)
obj[self.len_field] = len(s.getvalue())
return obj
def _decode(self, obj, ctx):
del obj[self.len_field]
return obj
def DataStruct(name, *subcons, **kwds):
return _DataAdapter(Struct(name, *subcons), **kwds)
class AssertEof(Subconstruct):
def _parse(self, stream, context):
obj = self.subcon._parse(stream, context)
pos = stream.tell()
stream.seek(0, 2)
eof = stream.tell()
stream.seek(pos)
if pos != eof:
self.subcon.subcon._parse(stream, context)
return obj
class BdAddrAdapter(Adapter):
def _encode(self, obj, context):
return "".join(chr(int(c, 16)) for c in reversed(obj.split(":")))
def _decode(self, obj, context):
return ":".join("%02X" % ord(c) for c in reversed(obj))
def BdAddr(name):
return BdAddrAdapter(Bytes(name, 6))
class BtUuidAdapter(Adapter):
def _encode(self, obj, context):
if isinstance(obj, int):
return [ord(c) for c in ULInt16("uuid16").build(obj)]
else:
import uuid
return [ord(c) for c in reversed(uuid.UUID(obj).bytes)]
def _decode(self, obj, context):
if len(obj) == 2:
return "0x%04x" % ULInt16("uuid16").parse("".join(chr(c) for c in obj))
else:
import uuid
return str(uuid.UUID(bytes="".join(chr(c) for c in reversed(obj))))
def BT_UUID(name):
return BtUuidAdapter(GreedyRange(ULInt8(name)))
def pprint_container(obj, indent=0):
s = ""
if isinstance(obj, Container):
s += "Container(\n"
for k in sorted(obj.keys()):
s += " " * (indent + 1) + "%s = %s,\n" % (k, pprint_container(obj[k], indent + 1))
s += " " * indent + ")"
elif isinstance(obj, str):
s += repr(obj)
elif isinstance(obj, bool):
s += "True" if obj else "False"
elif isinstance(obj, int):
s += "%d" % obj
elif isinstance(obj, list):
s += "[\n"
for i in obj:
s += " " * (indent + 1) + "%s,\n" % pprint_container(i, indent + 1)
s += " " * indent + "]"
else:
assert NotImplementedError, "Not supported: %s" % obj
return s
| lizardo/blueish | bt_lib/construct_helpers.py | Python | gpl-3.0 | 4,289 |
#!/usr/bin/env python
"""
Extract genome annotation from a GFF (a tab delimited format for storing sequence features and annotations) file.
Requirements:
Numpy :- http://numpy.org/
Scipy :- http://scipy.org/
Copyright (C)
2009-2012 Friedrich Miescher Laboratory of the Max Planck Society, Tubingen, Germany.
2012-2013 Memorial Sloan-Kettering Cancer Center, New York City, USA.
"""
import re
import os
import sys
import urllib
import numpy as np
import scipy.io as sio
from collections import defaultdict
import helper as utils
def _attribute_tags(col9):
"""
Split the key-value tags from the attribute column, it takes column number 9 from GTF/GFF file
"""
info = defaultdict(list)
is_gff = False
if not col9:
return is_gff, info
# trim the line ending semi-colon ucsc may have some white-space
col9 = col9.rstrip(';| ')
# attributes from 9th column
atbs = col9.split(" ; ")
if len(atbs) == 1:
atbs = col9.split("; ")
if len(atbs) == 1:
atbs = col9.split(";")
# check the GFF3 pattern which has key value pairs like:
gff3_pat = re.compile("\w+=")
# sometime GTF have: gene_id uc002zkg.1;
gtf_pat = re.compile("\s?\w+\s")
key_vals = []
if gff3_pat.match(atbs[0]): # gff3 pattern
is_gff = True
key_vals = [at.split('=') for at in atbs]
elif gtf_pat.match(atbs[0]): # gtf pattern
for at in atbs:
key_vals.append(at.strip().split(" ",1))
else:
# to handle attribute column has only single value
key_vals.append(['ID', atbs[0]])
# get key, val items
for item in key_vals:
key, val = item
# replace the double qoutes from feature identifier
val = re.sub('"', '', val)
# replace the web formating place holders to plain text format
info[key].extend([urllib.unquote(v) for v in val.split(',') if v])
return is_gff, info
def _spec_features_keywd(gff_parts):
"""
Specify the feature key word according to the GFF specifications
"""
for t_id in ["transcript_id", "transcriptId", "proteinId"]:
try:
gff_parts["info"]["Parent"] = gff_parts["info"][t_id]
break
except KeyError:
pass
for g_id in ["gene_id", "geneid", "geneId", "name", "gene_name", "genename"]:
try:
gff_parts["info"]["GParent"] = gff_parts["info"][g_id]
break
except KeyError:
pass
## TODO key words
for flat_name in ["Transcript", "CDS"]:
if gff_parts["info"].has_key(flat_name):
# parents
if gff_parts['type'] in [flat_name] or re.search(r'transcript', gff_parts['type'], re.IGNORECASE):
if not gff_parts['id']:
gff_parts['id'] = gff_parts['info'][flat_name][0]
#gff_parts["info"]["ID"] = [gff_parts["id"]]
# children
elif gff_parts["type"] in ["intron", "exon", "pseudogenic_exon", "three_prime_UTR",
"coding_exon", "five_prime_UTR", "CDS", "stop_codon",
"start_codon"]:
gff_parts["info"]["Parent"] = gff_parts["info"][flat_name]
break
return gff_parts
def Parse(ga_file):
"""
Parsing GFF/GTF file based on feature relationship, it takes the input file.
"""
child_map = defaultdict(list)
parent_map = dict()
ga_handle = utils._open_file(ga_file)
for rec in ga_handle:
rec = rec.strip('\n\r')
# skip empty line fasta identifier and commented line
if not rec or rec[0] in ['#', '>']:
continue
# skip the genome sequence
if not re.search('\t', rec):
continue
parts = rec.split('\t')
assert len(parts) >= 8, rec
# process the attribute column (9th column)
ftype, tags = _attribute_tags(parts[-1])
if not tags: # skip the line if no attribute column.
continue
# extract fields
if parts[1]:
tags["source"] = parts[1]
if parts[7]:
tags["phase"] = parts[7]
gff_info = dict()
gff_info['info'] = dict(tags)
#gff_info["is_gff3"] = ftype
gff_info['chr'] = parts[0]
if parts[3] and parts[4]:
gff_info['location'] = [int(parts[3]) ,
int(parts[4])]
gff_info['type'] = parts[2]
gff_info['id'] = tags.get('ID', [''])[0]
if parts[6] in ['?', '.']:
parts[6] = None
gff_info['strand'] = parts[6]
# key word according to the GFF spec.
if not ftype:
gff_info = _spec_features_keywd(gff_info)
# link the feature relationships
if gff_info['info'].has_key('Parent'):
for p in gff_info['info']['Parent']:
if p == gff_info['id']:
gff_info['id'] = ''
break
rec_category = 'child'
elif gff_info['id']:
rec_category = 'parent'
else:
rec_category = 'record'
# depends on the record category organize the features
if rec_category == 'child':
for p in gff_info['info']['Parent']:
# create the data structure based on source and feature id
child_map[(gff_info['chr'], gff_info['info']['source'], p)].append(
dict( type = gff_info['type'],
location = gff_info['location'],
strand = gff_info['strand'],
ID = gff_info['id'],
gene_id = gff_info['info'].get('GParent', '')
))
elif rec_category == 'parent':
parent_map[(gff_info['chr'], gff_info['info']['source'], gff_info['id'])] = dict(
type = gff_info['type'],
location = gff_info['location'],
strand = gff_info['strand'],
name = tags.get('Name', [''])[0])
elif rec_category == 'record':
#TODO how to handle plain records?
c = 1
ga_handle.close()
# depends on file type create parent feature
if not ftype:
parent_map, child_map = _create_missing_feature_type(parent_map, child_map)
# connecting parent child relations
# // essentially the parent child features are here from any type of GTF/GFF2/GFF3 file
gene_mat = _format_gene_models(parent_map, child_map)
return gene_mat
def _format_gene_models(parent_nf_map, child_nf_map):
"""
Genarate GeneObject based on the parsed file contents
parent_map: parent features with source and chromosome information
child_map: transctipt and exon information are encoded
"""
g_cnt = 0
gene = np.zeros((len(parent_nf_map),), dtype = utils.init_gene_DE())
for pkey, pdet in parent_nf_map.items():
# considering only gene features
if not re.search(r'gene', pdet.get('type', '')):
continue
# infer the gene start and stop if not there in the
if not pdet.get('location', []):
GNS, GNE = [], []
# multiple number of transcripts
for L1 in child_nf_map[pkey]:
GNS.append(L1.get('location', [])[0])
GNE.append(L1.get('location', [])[1])
GNS.sort()
GNE.sort()
pdet['location'] = [GNS[0], GNE[-1]]
orient = pdet.get('strand', '')
gene[g_cnt]['id'] = g_cnt +1
gene[g_cnt]['chr'] = pkey[0]
gene[g_cnt]['source'] = pkey[1]
gene[g_cnt]['name'] = pkey[-1]
gene[g_cnt]['start'] = pdet.get('location', [])[0]
gene[g_cnt]['stop'] = pdet.get('location', [])[1]
gene[g_cnt]['strand'] = orient
# default value
gene[g_cnt]['is_alt_spliced'] = 0
if len(child_nf_map[pkey]) > 1:
gene[g_cnt]['is_alt_spliced'] = 1
# complete sub-feature for all transcripts
dim = len(child_nf_map[pkey])
TRS = np.zeros((dim,), dtype=np.object)
EXON = np.zeros((dim,), dtype=np.object)
# fetching corresponding transcripts
for xq, Lv1 in enumerate(child_nf_map[pkey]):
TID = Lv1.get('ID', '')
TRS[xq]= np.array([TID])
orient = Lv1.get('strand', '')
# fetching different sub-features
child_feat = defaultdict(list)
for Lv2 in child_nf_map[(pkey[0], pkey[1], TID)]:
E_TYP = Lv2.get('type', '')
child_feat[E_TYP].append(Lv2.get('location'))
# make exon coordinate from cds and utr regions
if not child_feat.get('exon'):
if child_feat.get('CDS'):
exon_cod = utils.make_Exon_cod( orient,
NonetoemptyList(child_feat.get('five_prime_UTR')),
NonetoemptyList(child_feat.get('CDS')),
NonetoemptyList(child_feat.get('three_prime_UTR')))
child_feat['exon'] = exon_cod
else:
# searching through keys to find a pattern describing exon feature
ex_key_pattern = [k for k in child_feat if k.endswith("exon")]
child_feat['exon'] = child_feat[ex_key_pattern[0]]
# TODO only UTR's
# make general ascending order of coordinates
if orient == '-':
for etype, excod in child_feat.items():
if len(excod) > 1:
if excod[0][0] > excod[-1][0]:
excod.reverse()
child_feat[etype] = excod
# add sub-feature # make array for export to different out
EXON[xq] = np.array(child_feat.get('exon'), np.float64)
# add sub-features to the parent gene feature
gene[g_cnt]['transcripts'] = TRS
gene[g_cnt]['exons'] = EXON
gene[g_cnt]['gene_info'] = dict( ID = pkey[-1],
Name = pdet.get('name'),
Source = pkey[1])
g_cnt += 1
## deleting empty gene records from the main array
XPFLG = 0
for XP, ens in enumerate(gene):
if ens[0]==0:
XPFLG=1
break
if XPFLG==1:
XQC = range(XP, len(gene)+1)
gene = np.delete(gene, XQC)
return gene
def NonetoemptyList(XS):
"""
Convert a None type to empty list
"""
return [] if XS is None else XS
def _create_missing_feature_type(p_feat, c_feat):
"""
GFF/GTF file defines only child features. This function tries to create
the parent feature from the information provided in the attribute column.
example:
chr21 hg19_knownGene exon 9690071 9690100 0.000000 + . gene_id "uc002zkg.1"; transcript_id "uc002zkg.1";
chr21 hg19_knownGene exon 9692178 9692207 0.000000 + . gene_id "uc021wgt.1"; transcript_id "uc021wgt.1";
chr21 hg19_knownGene exon 9711935 9712038 0.000000 + . gene_id "uc011abu.2"; transcript_id "uc011abu.2";
This function gets the parsed feature annotations.
"""
child_n_map = defaultdict(list)
for fid, det in c_feat.items():
# get the details from grand child
GID = STRD = None
SPOS, EPOS = [], []
TYP = dict()
for gchild in det:
GID = gchild.get('gene_id', [''])[0]
SPOS.append(gchild.get('location', [])[0])
EPOS.append(gchild.get('location', [])[1])
STRD = gchild.get('strand', '')
TYP[gchild.get('type', '')] = 1
SPOS.sort()
EPOS.sort()
# infer transcript type
transcript_type = 'transcript'
transcript_type = 'mRNA' if TYP.get('CDS', '') or TYP.get('cds', '') else transcript_type
# gene id and transcript id are same
transcript_id = fid[-1]
if GID == transcript_id:
transcript_id = 'Transcript:' + str(GID)
# level -1 feature type
p_feat[(fid[0], fid[1], GID)] = dict( type = 'gene',
location = [], ## infer location based on multiple transcripts
strand = STRD,
name = GID )
# level -2 feature type
child_n_map[(fid[0], fid[1], GID)].append(
dict( type = transcript_type,
location = [SPOS[0], EPOS[-1]],
strand = STRD,
ID = transcript_id,
gene_id = '' ))
# reorganizing the grand child
for gchild in det:
child_n_map[(fid[0], fid[1], transcript_id)].append(
dict( type = gchild.get('type', ''),
location = gchild.get('location'),
strand = gchild.get('strand'),
ID = gchild.get('ID'),
gene_id = '' ))
return p_feat, child_n_map
## General instruction to use the above functions:
## Usage: GFFParser.py in.gff3 out.mat
try:
gff_file = sys.argv[1]
out_mat = sys.argv[2]
except:
print __doc__
sys.exit(-1)
## Parse the file accoring to the type and returns the genes informations --
gene_struct = Parse(gff_file)
## Write the gene annotations to a matlab struct array format --
sio.savemat(out_mat,
mdict = dict(genes = gene_struct),
format = '5',
oned_as = 'row')
| vipints/oqtans | oqtans_tools/rQuant/2.2/tools/GFFParser.py | Python | bsd-3-clause | 14,456 |
# -*- coding: utf-8 -*-
"""
SQLpie License (MIT License)
Copyright (c) 2011-2016 André Lessa, http://sqlpie.com
See LICENSE file.
"""
from flask import g
import sqlpie
import math, json
class Classifier(object):
USE_NUMBERS_AS_WEIGHTS_PARAM = "use_numbers_as_weights"
def __init__(self, model, subject_bucket=None, predicate=None):
self.model = model.strip()
self.subject_bucket = subject_bucket.strip()
self.predicate = predicate.strip()
self.model_id = sqlpie.Util.to_sha1(self.model)
self.subject_bucket_id = sqlpie.Util.to_sha1(subject_bucket)
self.predicate_id = sqlpie.Util.to_sha1(predicate)
m = sqlpie.Model().create(self.model, self.subject_bucket, self.predicate, \
self.model_id, self.subject_bucket_id, self.predicate_id)
@staticmethod
def train(model, relevant_features=[], use_numbers_as_weights=False):
model_id = sqlpie.Util.to_sha1(model.strip())
m = sqlpie.Model().get(model_id)
query = {"subject_bucket":m.subject_bucket, "predicate":m.predicate, \
"timestamp":{"start":m.last_observation}, "options":{"limit":1000, "offset":0}}
observations, total = sqlpie.Observation.get(query)
mc = sqlpie.ModelClassifier(m.model_id)
for o in observations:
subject_id = sqlpie.Util.to_sha1(o["subject_id"].strip())
if sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_LIST:
for value in o["value"]:
label = unicode(value)
mc.increment_label(subject_id, label)
elif sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_UNICODE:
label = o["value"]
mc.increment_label(subject_id, label)
else:
label = json.dumps(o["value"])
incr = 1
if use_numbers_as_weights and (sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_FLOAT or \
sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_INT):
weight = o["value"]
incr = incr * weight
mc.increment_label(subject_id, label, incr)
bucket_id = sqlpie.Util.to_sha1(o["object_bucket"])
document_id = sqlpie.Util.to_sha1(o["object_id"])
doc = sqlpie.Document.get(bucket_id, document_id)
features = sqlpie.Indexer.parse_features(doc.document, relevant_features)
counts = Classifier._count_words(features)
for feature, incr in list(counts.items()):
if use_numbers_as_weights and (sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_FLOAT or \
sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_INT):
weight = o["value"]
incr = incr * weight
mc.increment_feature(subject_id, feature, incr)
if sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_LIST:
for value in o["value"]:
label = unicode(value)
mc.increment_label_feature(subject_id, label, feature, incr)
elif sqlpie.Predicate.convert_type(o["value"]) == sqlpie.Predicate.IS_UNICODE:
label = o["value"]
mc.increment_label_feature(subject_id, label, feature, incr)
else:
label = json.dumps(o["value"])
mc.increment_label_feature(subject_id, label, feature, incr)
@staticmethod
def clear(model):
model_id = sqlpie.Util.to_sha1(model.strip())
mc = sqlpie.ModelClassifier(model_id)
mc.clear()
@staticmethod
def predict(model, subject, document, label_param=None, best_prediction_only=True):
model_id = sqlpie.Util.to_sha1(model.strip())
subject_id = sqlpie.Util.to_sha1(subject.strip())
mc = sqlpie.ModelClassifier(model_id)
features = sqlpie.Indexer.parse_features(document)
counts = Classifier._count_words(features)
target_labels = mc.get_labels(subject_id)
sum_all_labels = sum(target_labels.values())
sum_all_features = mc.sum_all_features(subject_id)
doc_features = mc.get_document_features(subject_id, features)
total, label_param_score, label_param_ratio = 0.0, 0.0, 0.0
scores = {}
for label in target_labels:
if sum_all_labels == 0:
prior_label = 0.0
else:
prior_label = target_labels[label] / sum_all_labels
log_prob_label = 0.0
sum_feature_values = mc.sum_feature_values(subject_id, label)
label_features = mc.get_label_features(subject_id, label, counts.keys())
for w, cnt in list(counts.items()):
if sum_all_features == 0 or w not in doc_features:
p_word = 0.0
else:
p_word = doc_features[w] / sum_all_features
if sum_feature_values == 0 or w not in label_features:
p_w_given_label = 0.0
else:
p_w_given_label = label_features[w] / sum_feature_values
if p_w_given_label > 0 and p_word > 0:
log_prob_label += math.log(cnt * p_w_given_label / p_word)
score_label = math.exp(log_prob_label + math.log(prior_label))
total += score_label
scores[label] = score_label
for k, v in scores.iteritems():
score_label = float("{0:.6f}".format(v))
if total == 0:
ratio_label = 0.0
else:
ratio_label = float("{0:.6f}".format((v/total) * 100))
scores[k] = {'label':k, '_score':score_label, '_ratio':ratio_label}
if k == label_param:
label_param_score, label_param_ratio = score_label, ratio_label
scores = [k[1] for k in sorted(scores.items(), key=lambda (k,v): -v["_score"])]
if best_prediction_only:
if len(scores) > 0 and label_param is None:
response = scores[0]
elif len(scores) > 0 and label_param is not None:
response = {'label':label_param, '_score':label_param_score, '_ratio':label_param_ratio}
else:
response = {'label':None, '_score':None, '_ratio':None}
else:
if label_param is not None:
response = {'label':label_param, '_score':label_param_score, '_ratio':label_param_ratio}
else:
response = scores
return response
#
# private
#
@staticmethod
def _count_words(words):
wc = {}
for word in words:
wc[word] = wc.get(word, 0.0) + 1.0
return wc
| lessaworld/SQLpie | sqlpie/services/classifier.py | Python | mit | 6,933 |
import json
import logging
from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework.test import APIClient
from apis.betterself.v1.urls import API_V1_LIST_CREATE_URL
from betterself.users.fixtures.factories import UserFactory
from betterself.users.tests.mixins.test_mixins import UsersTestsFixturesMixin
User = get_user_model()
logger = logging.Logger(__name__)
# python manage.py test apis.betterself.v1.tests.test_base
# I'm such an idiot, refactor all of this to be good mixins
# switch them to a template design Pattern ... that way you don't
# have to inherit over and over like a fool
class BaseAPIv1Tests(TestCase, UsersTestsFixturesMixin):
# pagination means does the serializer return results
# paginated or not, if paginated, the results display slightly different
PAGINATION = False
@classmethod
def setUpTestData(cls):
# i don't know how much i like this after seeing this a while later
# but the base_tests create user fixtures that are logged in for client 1 and client2
# this becomes useful because there are a lot of fixtures created from the "default"
cls.create_user_fixtures()
super(BaseAPIv1Tests, cls).setUpTestData()
def setUp(self):
# user has to be authenticated per each test!
self.client_1 = self.create_authenticated_user_on_client(APIClient(), self.user_1)
self.client_2 = self.create_authenticated_user_on_client(APIClient(), self.user_2)
# because the base-api-v1 was written a while ago, and you are embarrassed at your stupidity
class BaseAPIv2Tests(TestCase):
PAGINATION = False
@classmethod
def setUpTestData(cls):
cls.url = API_V1_LIST_CREATE_URL.format(cls.TEST_MODEL.RESOURCE_NAME)
cls.create_user_fixtures()
super().setUpTestData()
@classmethod
def create_user_fixtures(cls):
cls.user_1 = UserFactory(username=cls.username_1)
cls.user_2 = UserFactory(username=cls.username_2)
@classmethod
def create_authenticated_user_on_client(cls, client, user):
client.force_login(user)
# just a quick check just in case
assert user.is_authenticated()
return client
def setUp(self):
# this is kind of goofy when we've already set it as a class-attribute, but since there's a high
# probability someone may change something in the class - run this each time per test
self.user_1 = User.objects.get(username=self.username_1)
self.user_2 = User.objects.get(username=self.username_2)
# user has to be authenticated per each test!
self.client_1 = self.create_authenticated_user_on_client(APIClient(), self.user_1)
self.client_2 = self.create_authenticated_user_on_client(APIClient(), self.user_2)
super().setUp()
class GenericRESTMethodMixin(object):
def _make_post_request(self, client, request_parameters):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
data = json.dumps(request_parameters)
request = client.post(url, data=data, content_type='application/json')
return request
def _make_get_request(self, client):
url = API_V1_LIST_CREATE_URL.format(self.TEST_MODEL.RESOURCE_NAME)
request = client.get(url)
return request
def _get_results_from_response(self, response):
# pagination puts data into results
if self.PAGINATION:
request_data = response.data['results']
else:
request_data = response.data
return request_data
| jeffshek/betterself | apis/betterself/v1/tests/test_base.py | Python | mit | 3,612 |
"""
Package configuration file
"""
from setuptools import setup
setup(
name='pylint-mccabe',
version='0.1.3',
author='Infoxchange Australia dev team',
author_email='[email protected]',
py_modules=['pylint_mccabe'],
url='http://pypi.python.org/pypi/pylint-mccabe/',
license='MIT',
description='McCabe complexity checker as a PyLint plugin',
long_description=open('README.rst').read(),
install_requires=[
'mccabe >= 0.2',
'pep8 >= 1.4.6',
'pylint >= 0.28.0',
],
)
| infoxchange/pylint-mccabe | setup.py | Python | mit | 537 |
import itchat
itchat.login()
friends = itchat.get_friends(update = True)[0:]
info = {}
for i in friends:
info[i['NickName']] = i.Signature
print(info) | XiangYz/webscraper | itchat_test.py | Python | lgpl-2.1 | 157 |
import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.views.generic import TemplateView, View
from zentral.utils.api_views import APIAuthError
from zentral.utils.http import user_agent_and_ip_address_from_request
from .conf import puppet_conf
from .events import post_puppet_report
logger = logging.getLogger('zentral.contrib.puppet.views')
# setup > puppet instances
class InstancesView(LoginRequiredMixin, TemplateView):
template_name = "puppet/instance_list.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["setup"] = True
ctx["instances"] = list(puppet_conf.get_instances_with_secrets())
instances_count = len(ctx["instances"])
if instances_count == 0 or instances_count > 1:
suffix = "s"
else:
suffix = ""
ctx["title"] = "{} puppet instance{}".format(instances_count, suffix)
return ctx
# API
class PostReportView(View):
def post(self, request, *args, **kwargs):
try:
instance = puppet_conf.get_instance_with_secret(kwargs["secret"])
except APIAuthError:
return HttpResponseForbidden("Forbidden")
except KeyError:
return HttpResponseNotFound("Unknown puppet instance")
user_agent, ip = user_agent_and_ip_address_from_request(request)
if not request.encoding or request.encoding.lower() != "utf-8":
return HttpResponse("Unsupported encoding", status=415)
payload = request.body.decode(request.encoding)
post_puppet_report(instance, user_agent, ip, payload)
return HttpResponse("OK")
| zentralopensource/zentral | zentral/contrib/puppet/views.py | Python | apache-2.0 | 1,763 |
import time
from flask import request
from funcy import project
from redash import models
from redash.wsgi import api
from redash.permissions import require_access, require_admin_or_owner, view_only
from redash.handlers.base import BaseResource, require_fields, get_object_or_404
class AlertResource(BaseResource):
def get(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
return alert.to_dict()
def post(self, alert_id):
req = request.get_json(True)
params = project(req, ('options', 'name', 'query_id', 'rearm'))
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_admin_or_owner(alert.user.id)
if 'query_id' in params:
params['query'] = params.pop('query_id')
alert.update_instance(**params)
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
class AlertListResource(BaseResource):
def post(self):
req = request.get_json(True)
require_fields(req, ('options', 'name', 'query_id'))
query = models.Query.get_by_id_and_org(req['query_id'], self.current_org)
require_access(query.groups, self.current_user, view_only)
alert = models.Alert.create(
name=req['name'],
query=query,
user=self.current_user,
options=req['options']
)
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
# TODO: should be in model?
models.AlertSubscription.create(alert=alert, user=self.current_user)
self.record_event({
'action': 'subscribe',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
def get(self):
return [alert.to_dict() for alert in models.Alert.all(groups=self.current_user.groups)]
class AlertSubscriptionListResource(BaseResource):
def post(self, alert_id):
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
subscription = models.AlertSubscription.create(alert=alert_id, user=self.current_user)
self.record_event({
'action': 'subscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert'
})
return subscription.to_dict()
def get(self, alert_id):
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
subscriptions = models.AlertSubscription.all(alert_id)
return [s.to_dict() for s in subscriptions]
class AlertSubscriptionResource(BaseResource):
def delete(self, alert_id, subscriber_id):
models.AlertSubscription.unsubscribe(alert_id, subscriber_id)
require_admin_or_owner(subscriber_id)
self.record_event({
'action': 'unsubscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert'
})
api.add_org_resource(AlertResource, '/api/alerts/<alert_id>', endpoint='alert')
api.add_org_resource(AlertSubscriptionListResource, '/api/alerts/<alert_id>/subscriptions', endpoint='alert_subscriptions')
api.add_org_resource(AlertSubscriptionResource, '/api/alerts/<alert_id>/subscriptions/<subscriber_id>', endpoint='alert_subscription')
api.add_org_resource(AlertListResource, '/api/alerts', endpoint='alerts')
| olivetree123/redash-x | redash/handlers/alerts.py | Python | bsd-2-clause | 3,932 |
from multiprocessing import Pool
def run_in_parallel(inputs, worker, no_of_processes=4):
"""Run given worker function in parallel with nunmber of processes given."""
# TODO multiprocessing does not work in PlanetLab nodes. OS Error 38!
# Fall back to another parallelization method if there's an error.
p = Pool(no_of_processes)
p.map(worker, inputs) | fpdetective/fpdetective | src/crawler/parallelize.py | Python | gpl-3.0 | 373 |
import flask
import flask_buzz
app = flask.app.Flask(__name__)
def log_error(err):
flask.current_app.logger.error(str(err))
app.register_error_handler(
flask_buzz.FlaskBuzz,
flask_buzz.FlaskBuzz.build_error_handler(log_error),
)
@app.route('/')
def index():
raise flask_buzz.FlaskBuzz("There's a problem that should be logged")
if __name__ == '__main__':
app.run()
| dusktreader/flask-buzz | examples/tasks.py | Python | mit | 395 |
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| utluiz/utluiz.github.io | pyscript/Lib/unittest/test/testmock/testpatch.py | Python | mit | 54,911 |
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
width,height = len(matrix[0]),len(matrix)
for i in xrange(height):
foundzero = False
for j in xrange(width):
if matrix[i][j] == 0:
foundzero = True
matrix[i][j] = float("inf")
if not foundzero:
continue
for j in xrange(width):
if matrix[i][j] != float("inf"):
matrix[i][j] = 0
for i in xrange(width):
foundtarget = False
for j in xrange(height):
if matrix[j][i] == float("inf"):
foundtarget = True
break
if not foundtarget:
continue
for j in xrange(height):
matrix[j][i] = 0
| hufeiya/leetcode | python/73_Set_Matrix_Zeroes.py | Python | gpl-2.0 | 1,004 |
import datetime
import pywikibot
from pywikibot.data.api import CachedRequest
import unittest
class DryAPITests(unittest.TestCase):
def setUp(self):
self.parms = {'site': pywikibot.Site('en'),
'action': 'query',
'meta': 'userinfo'}
self.req = CachedRequest(expiry=1, **self.parms)
self.expreq = CachedRequest(expiry=0, **self.parms)
self.diffreq = CachedRequest(expiry=1, site=pywikibot.Site('en'), action='query', meta='siteinfo')
self.diffsite = CachedRequest(expiry=1, site=pywikibot.Site('de'), action='query', meta='userinfo')
def test_expiry_formats(self):
self.assertEqual(self.req.expiry, CachedRequest(datetime.timedelta(days=1), **self.parms).expiry)
def test_get_cache_dir(self):
retval = self.req._get_cache_dir()
self.assertIn('apicache', retval)
def test_create_file_name(self):
self.assertEqual(self.req._create_file_name(), self.req._create_file_name())
self.assertEqual(self.req._create_file_name(), self.expreq._create_file_name())
self.assertNotEqual(self.req._create_file_name(), self.diffreq._create_file_name())
def test_cachefile_path(self):
self.assertEqual(self.req._cachefile_path(), self.req._cachefile_path())
self.assertEqual(self.req._cachefile_path(), self.expreq._cachefile_path())
self.assertNotEqual(self.req._cachefile_path(), self.diffreq._cachefile_path())
self.assertNotEqual(self.req._cachefile_path(), self.diffsite._cachefile_path())
def test_expired(self):
self.assertFalse(self.req._expired(datetime.datetime.now()))
self.assertTrue(self.req._expired(datetime.datetime.now() - datetime.timedelta(days=2)))
if __name__ == '__main__':
unittest.main()
| legoktm/pywikipedia-rewrite | tests/dry_api_tests.py | Python | mit | 1,850 |
#!/usr/bin/env python
import sys
import compileall
from os import walk, unlink
from os.path import join, splitext, exists
assert sys.argv[1], "usage: makepyc /path/to/lib"
for root, dirs, files in walk(sys.argv[1]):
for name in files:
if name.endswith('.pyc'):
pyc = join(root, name)
py = splitext(pyc)[0] + '.py'
if not exists(py):
print 'Removing orphaned', pyc, '...'
unlink(pyc)
compileall.compile_dir(sys.argv[1])
| AAFC-MBB/galaxy-cloudman-playbook | roles/galaxyprojectdotorg.galaxy/files/makepyc.py | Python | mit | 506 |
"""Support for Ecobee Thermostats."""
import collections
from typing import Optional
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
STATE_ON,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.temperature import convert
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
from .util import ecobee_date, ecobee_time
ATTR_COOL_TEMP = "cool_temp"
ATTR_END_DATE = "end_date"
ATTR_END_TIME = "end_time"
ATTR_FAN_MIN_ON_TIME = "fan_min_on_time"
ATTR_FAN_MODE = "fan_mode"
ATTR_HEAT_TEMP = "heat_temp"
ATTR_RESUME_ALL = "resume_all"
ATTR_START_DATE = "start_date"
ATTR_START_TIME = "start_time"
ATTR_VACATION_NAME = "vacation_name"
DEFAULT_RESUME_ALL = False
PRESET_TEMPERATURE = "temp"
PRESET_VACATION = "vacation"
PRESET_HOLD_NEXT_TRANSITION = "next_transition"
PRESET_HOLD_INDEFINITE = "indefinite"
AWAY_MODE = "awayMode"
PRESET_HOME = "home"
PRESET_SLEEP = "sleep"
# Order matters, because for reverse mapping we don't want to map HEAT to AUX
ECOBEE_HVAC_TO_HASS = collections.OrderedDict(
[
("heat", HVAC_MODE_HEAT),
("cool", HVAC_MODE_COOL),
("auto", HVAC_MODE_AUTO),
("off", HVAC_MODE_OFF),
("auxHeatOnly", HVAC_MODE_HEAT),
]
)
ECOBEE_HVAC_ACTION_TO_HASS = {
# Map to None if we do not know how to represent.
"heatPump": CURRENT_HVAC_HEAT,
"heatPump2": CURRENT_HVAC_HEAT,
"heatPump3": CURRENT_HVAC_HEAT,
"compCool1": CURRENT_HVAC_COOL,
"compCool2": CURRENT_HVAC_COOL,
"auxHeat1": CURRENT_HVAC_HEAT,
"auxHeat2": CURRENT_HVAC_HEAT,
"auxHeat3": CURRENT_HVAC_HEAT,
"fan": CURRENT_HVAC_FAN,
"humidifier": None,
"dehumidifier": CURRENT_HVAC_DRY,
"ventilator": CURRENT_HVAC_FAN,
"economizer": CURRENT_HVAC_FAN,
"compHotWater": None,
"auxHotWater": None,
}
PRESET_TO_ECOBEE_HOLD = {
PRESET_HOLD_NEXT_TRANSITION: "nextTransition",
PRESET_HOLD_INDEFINITE: "indefinite",
}
SERVICE_CREATE_VACATION = "create_vacation"
SERVICE_DELETE_VACATION = "delete_vacation"
SERVICE_RESUME_PROGRAM = "resume_program"
SERVICE_SET_FAN_MIN_ON_TIME = "set_fan_min_on_time"
DTGROUP_INCLUSIVE_MSG = (
f"{ATTR_START_DATE}, {ATTR_START_TIME}, {ATTR_END_DATE}, "
f"and {ATTR_END_TIME} must be specified together"
)
CREATE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
vol.Required(ATTR_COOL_TEMP): vol.Coerce(float),
vol.Required(ATTR_HEAT_TEMP): vol.Coerce(float),
vol.Inclusive(
ATTR_START_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_date,
vol.Inclusive(
ATTR_START_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_time,
vol.Inclusive(ATTR_END_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_date,
vol.Inclusive(ATTR_END_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_time,
vol.Optional(ATTR_FAN_MODE, default="auto"): vol.Any("auto", "on"),
vol.Optional(ATTR_FAN_MIN_ON_TIME, default=0): vol.All(
int, vol.Range(min=0, max=60)
),
}
)
DELETE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
}
)
RESUME_PROGRAM_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
}
)
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
}
)
SUPPORT_FLAGS = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_FAN_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ecobee thermostat."""
data = hass.data[DOMAIN]
devices = [Thermostat(data, index) for index in range(len(data.ecobee.thermostats))]
async_add_entities(devices, True)
def create_vacation_service(service):
"""Create a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
for thermostat in devices:
if thermostat.entity_id == entity_id:
thermostat.create_vacation(service.data)
thermostat.schedule_update_ha_state(True)
break
def delete_vacation_service(service):
"""Delete a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
vacation_name = service.data[ATTR_VACATION_NAME]
for thermostat in devices:
if thermostat.entity_id == entity_id:
thermostat.delete_vacation(vacation_name)
thermostat.schedule_update_ha_state(True)
break
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [
device for device in devices if device.entity_id in entity_id
]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [
device for device in devices if device.entity_id in entity_id
]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.async_register(
DOMAIN,
SERVICE_CREATE_VACATION,
create_vacation_service,
schema=CREATE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_DELETE_VACATION,
delete_vacation_service,
schema=DELETE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_FAN_MIN_ON_TIME,
fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_RESUME_PROGRAM,
resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA,
)
class Thermostat(ClimateDevice):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
self._name = self.thermostat["name"]
self.vacation = None
self._last_active_hvac_mode = HVAC_MODE_AUTO
self._operation_list = []
if (
self.thermostat["settings"]["heatStages"]
or self.thermostat["settings"]["hasHeatPump"]
):
self._operation_list.append(HVAC_MODE_HEAT)
if self.thermostat["settings"]["coolStages"]:
self._operation_list.append(HVAC_MODE_COOL)
if len(self._operation_list) == 2:
self._operation_list.insert(0, HVAC_MODE_AUTO)
self._operation_list.append(HVAC_MODE_OFF)
self._preset_modes = {
comfort["climateRef"]: comfort["name"]
for comfort in self.thermostat["program"]["climates"]
}
self._fan_modes = [FAN_AUTO, FAN_ON]
self.update_without_throttle = False
async def async_update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
await self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
await self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
if self.hvac_mode is not HVAC_MODE_OFF:
self._last_active_hvac_mode = self.hvac_mode
@property
def available(self):
"""Return if device is available."""
return self.thermostat["runtime"]["connected"]
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat["name"]
@property
def unique_id(self):
"""Return a unique identifier for this ecobee thermostat."""
return self.thermostat["identifier"]
@property
def device_info(self):
"""Return device information for this ecobee thermostat."""
try:
model = f"{ECOBEE_MODEL_TO_NAME[self.thermostat['modelNumber']]} Thermostat"
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
self.name,
self.thermostat["modelNumber"],
)
return None
return {
"identifiers": {(DOMAIN, self.thermostat["identifier"])},
"name": self.name,
"manufacturer": MANUFACTURER,
"model": model,
}
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat["runtime"]["actualTemperature"] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return None
if self.hvac_mode == HVAC_MODE_HEAT:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
if self.hvac_mode == HVAC_MODE_COOL:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def fan(self):
"""Return the current fan status."""
if "fan" in self.thermostat["equipmentStatus"]:
return STATE_ON
return HVAC_MODE_OFF
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat["runtime"]["desiredFanMode"]
@property
def fan_modes(self):
"""Return the available fan modes."""
return self._fan_modes
@property
def preset_mode(self):
"""Return current preset mode."""
events = self.thermostat["events"]
for event in events:
if not event["running"]:
continue
if event["type"] == "hold":
if event["holdClimateRef"] in self._preset_modes:
return self._preset_modes[event["holdClimateRef"]]
# Any hold not based on a climate is a temp hold
return PRESET_TEMPERATURE
if event["type"].startswith("auto"):
# All auto modes are treated as holds
return event["type"][4:].lower()
if event["type"] == "vacation":
self.vacation = event["name"]
return PRESET_VACATION
return self._preset_modes[self.thermostat["program"]["currentClimateRef"]]
@property
def hvac_mode(self):
"""Return current operation."""
return ECOBEE_HVAC_TO_HASS[self.thermostat["settings"]["hvacMode"]]
@property
def hvac_modes(self):
"""Return the operation modes list."""
return self._operation_list
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return self.thermostat["runtime"]["actualHumidity"]
@property
def hvac_action(self):
"""Return current HVAC action.
Ecobee returns a CSV string with different equipment that is active.
We are prioritizing any heating/cooling equipment, otherwase look at
drying/fanning. Idle if nothing going on.
We are unable to map all actions to HA equivalents.
"""
if self.thermostat["equipmentStatus"] == "":
return CURRENT_HVAC_IDLE
actions = [
ECOBEE_HVAC_ACTION_TO_HASS[status]
for status in self.thermostat["equipmentStatus"].split(",")
if ECOBEE_HVAC_ACTION_TO_HASS[status] is not None
]
for action in (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
):
if action in actions:
return action
return CURRENT_HVAC_IDLE
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
status = self.thermostat["equipmentStatus"]
return {
"fan": self.fan,
"climate_mode": self._preset_modes[
self.thermostat["program"]["currentClimateRef"]
],
"equipment_running": status,
"fan_min_on_time": self.thermostat["settings"]["fanMinOnTime"],
}
@property
def is_aux_heat(self):
"""Return true if aux heater."""
return "auxHeat" in self.thermostat["equipmentStatus"]
def set_preset_mode(self, preset_mode):
"""Activate a preset."""
if preset_mode == self.preset_mode:
return
self.update_without_throttle = True
# If we are currently in vacation mode, cancel it.
if self.preset_mode == PRESET_VACATION:
self.data.ecobee.delete_vacation(self.thermostat_index, self.vacation)
if preset_mode == PRESET_AWAY:
self.data.ecobee.set_climate_hold(
self.thermostat_index, "away", "indefinite"
)
elif preset_mode == PRESET_TEMPERATURE:
self.set_temp_hold(self.current_temperature)
elif preset_mode in (PRESET_HOLD_NEXT_TRANSITION, PRESET_HOLD_INDEFINITE):
self.data.ecobee.set_climate_hold(
self.thermostat_index,
PRESET_TO_ECOBEE_HOLD[preset_mode],
self.hold_preference(),
)
elif preset_mode == PRESET_NONE:
self.data.ecobee.resume_program(self.thermostat_index)
elif preset_mode in self.preset_modes:
climate_ref = None
for comfort in self.thermostat["program"]["climates"]:
if comfort["name"] == preset_mode:
climate_ref = comfort["climateRef"]
break
if climate_ref is not None:
self.data.ecobee.set_climate_hold(
self.thermostat_index, climate_ref, self.hold_preference()
)
else:
_LOGGER.warning("Received unknown preset mode: %s", preset_mode)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index, preset_mode, self.hold_preference()
)
@property
def preset_modes(self):
"""Return available preset modes."""
return list(self._preset_modes.values())
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
self.data.ecobee.set_hold_temp(
self.thermostat_index,
cool_temp_setpoint,
heat_temp_setpoint,
self.hold_preference(),
)
_LOGGER.debug(
"Setting ecobee hold_temp to: heat=%s, is=%s, cool=%s, is=%s",
heat_temp,
isinstance(heat_temp, (int, float)),
cool_temp,
isinstance(cool_temp, (int, float)),
)
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if fan_mode.lower() != STATE_ON and fan_mode.lower() != HVAC_MODE_AUTO:
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
cool_temp = self.thermostat["runtime"]["desiredCool"] / 10.0
heat_temp = self.thermostat["runtime"]["desiredHeat"] / 10.0
self.data.ecobee.set_fan_mode(
self.thermostat_index,
fan_mode,
cool_temp,
heat_temp,
self.hold_preference(),
)
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.hvac_mode == HVAC_MODE_HEAT or self.hvac_mode == HVAC_MODE_COOL:
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat["settings"]["heatCoolMinDelta"] / 10
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.hvac_mode == HVAC_MODE_AUTO and (
low_temp is not None or high_temp is not None
):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error("Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
self.data.ecobee.set_humidity(self.thermostat_index, humidity)
def set_hvac_mode(self, hvac_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
ecobee_value = next(
(k for k, v in ECOBEE_HVAC_TO_HASS.items() if v == hvac_mode), None
)
if ecobee_value is None:
_LOGGER.error("Invalid mode for set_hvac_mode: %s", hvac_mode)
return
self.data.ecobee.set_hvac_mode(self.thermostat_index, ecobee_value)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, "true" if resume_all else "false"
)
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are 'useEndTime4hour',
# 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe'
default = self.thermostat["settings"]["holdAction"]
if default == "nextTransition":
return default
# add further conditions if other hold durations should be
# supported; note that this should not include 'indefinite'
# as an indefinite away hold is interpreted as away_mode
return "nextTransition"
def create_vacation(self, service_data):
"""Create a vacation with user-specified parameters."""
vacation_name = service_data[ATTR_VACATION_NAME]
cool_temp = convert(
service_data[ATTR_COOL_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
heat_temp = convert(
service_data[ATTR_HEAT_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
start_date = service_data.get(ATTR_START_DATE)
start_time = service_data.get(ATTR_START_TIME)
end_date = service_data.get(ATTR_END_DATE)
end_time = service_data.get(ATTR_END_TIME)
fan_mode = service_data[ATTR_FAN_MODE]
fan_min_on_time = service_data[ATTR_FAN_MIN_ON_TIME]
kwargs = {
key: value
for key, value in {
"start_date": start_date,
"start_time": start_time,
"end_date": end_date,
"end_time": end_time,
"fan_mode": fan_mode,
"fan_min_on_time": fan_min_on_time,
}.items()
if value is not None
}
_LOGGER.debug(
"Creating a vacation on thermostat %s with name %s, cool temp %s, heat temp %s, "
"and the following other parameters: %s",
self.name,
vacation_name,
cool_temp,
heat_temp,
kwargs,
)
self.data.ecobee.create_vacation(
self.thermostat_index, vacation_name, cool_temp, heat_temp, **kwargs
)
def delete_vacation(self, vacation_name):
"""Delete a vacation with the specified name."""
_LOGGER.debug(
"Deleting a vacation on thermostat %s with name %s",
self.name,
vacation_name,
)
self.data.ecobee.delete_vacation(self.thermostat_index, vacation_name)
def turn_on(self):
"""Set the thermostat to the last active HVAC mode."""
_LOGGER.debug(
"Turning on ecobee thermostat %s in %s mode",
self.name,
self._last_active_hvac_mode,
)
self.set_hvac_mode(self._last_active_hvac_mode)
| Teagan42/home-assistant | homeassistant/components/ecobee/climate.py | Python | apache-2.0 | 23,976 |
from utils import *
def make_plots(bar_plots_name, plots_name, file_names):
"""
Uses the JSON data in the given 'file_names' to write aggregate bar plots
to 'bar_plots_name' and aggregate plots to 'plots_name'.
"""
data = [read_json_data(file) for file in file_names]
output_bar_multiple(bar_plots_name, data)
output_plot_multiple(plots_name, data)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
eprint("usage:", sys.argv[0], "<json-file-name> ...")
eprint(" Makes aggregate plots from all the data in the given JSON "
"files.")
sys.exit(1)
else:
make_plots("bar_plots.html", "plots.html", sys.argv[1:])
| Dudro/317_AI | make_plots.py | Python | gpl-2.0 | 710 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Trains Generic String and Spectrum kernel Support Vector Regressions for peptide protein binding affinity prediction
"""
import h5py as h
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns; sns.set_palette("hls", 4)
from gs_kernel.gs_kernel import gs_gram_matrix
from itertools import product
from sklearn.svm import SVR as SupportVectorRegression
data_path = "./data/peptide/"
KERNEL_MATRIX_CACHE = os.path.join(data_path, "gs_kernel_matrix_cache.h5")
PHYSICOCHEMICAL_PROPERTY_FILE = os.path.join(data_path, "amino_acids_matrix/AA.blosum50.dat")
def sequences_to_vec(sequences, length=3):
d = {}
# Find all unique k-mers
for s in sequences:
for i in range(len(s) - (length - 1)):
d[s[i : i + length]] = True
assert len(s[i : i + length]) == length
# Construct a count vector for each sequence
seq_vec = np.zeros((len(sequences), len(d)), dtype=np.float)
for i, s in enumerate(sequences):
for j, kmer in enumerate(d):
seq_vec[i, j] = s.count(kmer)
return seq_vec
def cross_validation_gs_kernel(seq, is_train, folds):
# Cross-validation
# Repeat for each combination of candidate hyperparameter values
sigma_position_values = np.hstack((1e-9, np.logspace(0, 1, 5)))
sigma_amino_acid_values = np.hstack((1e-9, np.logspace(0, 1, 5)))
substring_length = 9
C_values = [0.01, 0.1, 1.0, 10., 100.]
best_result_aa_pos = {"score": -np.infty}
best_result_aa_only = {"score": -np.infty}
best_result_pos_only = {"score": -np.infty}
with h.File(KERNEL_MATRIX_CACHE, "w") as kernel_cache:
for sigma_pos, sigma_aa, C in product(sigma_position_values, sigma_amino_acid_values, C_values):
print "Parameters: sigma pos: {0:.4f} sigma amino acid: {1:.4f} C: {2:.4f}".format(sigma_pos, sigma_aa, C)
kernel_matrix_id = "{0:.4f}{1:.4f}{2:d}".format(sigma_pos, sigma_aa, substring_length)
if kernel_matrix_id in kernel_cache:
K = kernel_cache[kernel_matrix_id][...]
else:
K = gs_gram_matrix(seq, seq,
amino_acid_property_file=PHYSICOCHEMICAL_PROPERTY_FILE,
sigma_position=sigma_pos,
sigma_amino_acid=sigma_aa,
substring_length=substring_length)
kernel_cache.create_dataset(kernel_matrix_id, data=K)
K_train = K[is_train][:, is_train]
K_test = K[~is_train][:, is_train]
y_train = labels[is_train]
y_test = labels[~is_train]
# Cross-validation
fold_scores = []
for fold in np.unique(folds):
print "...Fold {0:d}".format(fold + 1)
fold_K_train = K_train[folds != fold][:, folds != fold]
fold_K_test = K_train[folds == fold][:, folds != fold]
fold_y_train = y_train[folds != fold]
fold_y_test = y_train[folds == fold]
fold_estimator = SupportVectorRegression(kernel="precomputed", C=C)
fold_estimator.fit(fold_K_train.copy(), fold_y_train)
fold_scores.append(fold_estimator.score(fold_K_test, fold_y_test))
cv_score = np.mean(fold_scores)
print "...... cv score:", cv_score
if cv_score > best_result_aa_pos["score"]:
best_result_aa_pos["score"] = cv_score
best_result_aa_pos["K"] = dict(train=K_train, test=K_test, full=K)
best_result_aa_pos["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
best_result_aa_pos["hps"] = dict(sigma_position=sigma_pos, sigma_amino_acid=sigma_aa, C=C)
if np.isclose(sigma_pos, 1e-9) and cv_score > best_result_aa_only["score"]:
best_result_aa_only["score"] = cv_score
best_result_aa_only["K"] = dict(train=K_train, test=K_test, full=K)
best_result_aa_only["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
best_result_aa_only["hps"] = dict(sigma_position=sigma_pos, sigma_amino_acid=sigma_aa, C=C)
if np.isclose(sigma_aa, 1e-9) and cv_score > best_result_pos_only["score"]:
best_result_pos_only["score"] = cv_score
best_result_pos_only["K"] = dict(train=K_train, test=K_test, full=K)
best_result_pos_only["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
best_result_pos_only["hps"] = dict(sigma_position=sigma_pos, sigma_amino_acid=sigma_aa, C=C)
print
print
return best_result_aa_pos, best_result_aa_only, best_result_pos_only
def cross_validation_spectrum_kernel(seq, is_train, folds):
# Cross-validation
# Repeat for each combination of candidate hyperparameter values
substring_length = 3
C_values = [0.01, 0.1, 1.0, 10., 100.]
best_result = {"score": -np.infty}
seq_vec = sequences_to_vec(seq, substring_length)
K = np.dot(seq_vec, seq_vec.T)
K_train = K[is_train][:, is_train]
K_test = K[~is_train][:, is_train]
y_train = labels[is_train]
y_test = labels[~is_train]
for C in C_values:
print "Parameters: C: {0:.4f}".format(C)
# Cross-validation
fold_scores = []
for fold in np.unique(folds):
print "...Fold {0:d}".format(fold + 1)
fold_K_train = K_train[folds != fold][:, folds != fold]
fold_K_test = K_train[folds == fold][:, folds != fold]
fold_y_train = y_train[folds != fold]
fold_y_test = y_train[folds == fold]
fold_estimator = SupportVectorRegression(kernel="precomputed", C=C)
fold_estimator.fit(fold_K_train.copy(), fold_y_train)
fold_scores.append(fold_estimator.score(fold_K_test, fold_y_test))
cv_score = np.mean(fold_scores)
print "...... cv score:", cv_score
if cv_score > best_result["score"]:
best_result["score"] = cv_score
best_result["K"] = dict(train=K_train, test=K_test, full=K)
best_result["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
best_result["hps"] = dict(C=C)
print
print
return best_result
for ds in os.listdir(data_path):
if ".dat" not in ds:
continue
if "DRB1_0701" not in ds: # Limit to this allele for the tutorial
continue
random_state = np.random.RandomState(42)
dataset_path = os.path.join(data_path, ds)
seq, labels = zip(*[(l.strip().split()[1], l.strip().split()[2])for l in open(dataset_path, "r")])
labels = np.array(labels, dtype=np.float)
# Split the data set into a training (80% of the data) and testing set (20% of the data)
is_train = random_state.binomial(1, 0.8, len(labels)).astype(np.bool)
y_train = labels[is_train]
y_test = labels[~is_train]
# Assign each training example to a cross-validation fold
n_folds = 5
folds = np.arange(is_train.sum()) % n_folds
random_state.shuffle(folds)
best_result_aa_pos, best_result_aa_only, best_result_pos_only = cross_validation_gs_kernel(seq, is_train, folds)
best_result_spectrum = cross_validation_spectrum_kernel(seq, is_train, folds)
# Figure 1: GS kernel matrix with the selected hyperparameters
plt.clf()
cm = sns.clustermap(best_result_aa_pos["K"]["full"])
cm.ax_heatmap.tick_params(labelbottom="off", labelright="off")
cm.ax_col_dendrogram.set_title("Generic String Kernel Matrix for {0:d} peptides".format(len(labels)))
plt.savefig("gs_kernel_low_res.png", dpi=100, bbox_inches="tight")
plt.savefig("gs_kernel_high_res.png", dpi=400, bbox_inches="tight")
plt.show()
# Figure 2: Comparison of the predictive performance of GS kernel variants
plt.clf()
width = 0.5
plt.bar([1], [best_result_aa_pos["estimator"].score(best_result_aa_pos["K"]["test"], y_test)], width, label="GS (Alignment + Physicochemical)")
plt.bar([1 + width], [best_result_aa_only["estimator"].score(best_result_aa_only["K"]["test"], y_test)], width, label="GS (Physicochemical)")
plt.bar([1 + 2 * width], [best_result_pos_only["estimator"].score(best_result_pos_only["K"]["test"], y_test)], width, label="GS (Alignment)")
plt.xlabel("Method")
plt.ylabel("Coefficient of determination ($R^2$)")
plt.gca().tick_params(labelbottom='off')
plt.legend()
plt.savefig("gs_variants_low_res.png", dpi=100, bbox_inches="tight")
plt.savefig("gs_variants_high_res.png", dpi=400, bbox_inches="tight")
plt.show()
# Figure 3: Comparison of the GS kernel and the Spectrum kernel
plt.clf()
plt.scatter(y_test, best_result_aa_pos["estimator"].predict(best_result_aa_pos["K"]["test"]),
label="GS (Alignment + Physicochemical) [$R^2=${0:.3f}]".format(
best_result_aa_pos["estimator"].score(best_result_aa_pos["K"]["test"], y_test)))
plt.scatter(y_test, best_result_spectrum["estimator"].predict(best_result_spectrum["K"]["test"]),
label="Spectrum [$R^2=${0:.3f}]".format(
best_result_spectrum["estimator"].score(best_result_spectrum["K"]["test"], y_test)))
plt.plot([0, y_test.max()], [0, y_test.max()], color="black")
plt.xlabel("True binding affinity")
plt.ylabel("Predicted binding affinity")
plt.legend()
plt.savefig("gs_vs_spectrum_low_res.png", dpi=100, bbox_inches="tight")
plt.savefig("gs_vs_spectrum_high_res.png", dpi=400, bbox_inches="tight")
plt.show() | aldro61/microbiome-summer-school-2017 | exercises/code/applications.peptide.binding.py | Python | mit | 9,808 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from kerberos_common import *
from ambari_commons.os_check import OSCheck
class KerberosServer(KerberosScript):
@staticmethod
def write_kadm5_acl():
import params
Directory(params.kadm5_acl_dir,
owner='root',
create_parents=True,
group='root',
mode=0700
)
if (params.kadm5_acl_template is None) or not params.kadm5_acl_template.strip():
content = Template('kadm5_acl.j2')
else:
content = InlineTemplate(params.kadm5_acl_template)
File(params.kadm5_acl_path,
content=content,
owner='root',
group='root',
mode=0600
)
@staticmethod
def write_kdc_conf():
import params
Directory(params.kdc_conf_dir,
owner='root',
create_parents=True,
group='root',
mode=0700
)
if (params.kdc_conf_template is None) or not params.kdc_conf_template.strip():
content = Template('kdc_conf.j2')
else:
content = InlineTemplate(params.kdc_conf_template)
File(params.kdc_conf_path,
content=content,
owner='root',
group='root',
mode=0600
)
def install(self, env):
import params
self.install_packages(env)
self.configure(env)
# Create the Kerberos database (only on install, for now)
Execute(
"%s create -s -P '%s'" % (params.kdb5_util_path, KerberosScript.create_random_password()))
# Create or update the administrator account
KerberosScript.create_or_update_administrator_identity()
def start(self, env):
# Attempt to reconfigure the service before starting
self.configure(env)
# Create or update the administrator account
KerberosScript.create_or_update_administrator_identity()
if OSCheck.is_suse_family():
Execute('rckadmind start')
Execute('rckrb5kdc start')
elif OSCheck.is_ubuntu_family():
Execute('service krb5-kdc start')
Execute('service krb5-admin-server start')
else:
Execute('service krb5kdc start')
Execute('service kadmin start')
def stop(self, env):
if OSCheck.is_suse_family():
Execute('rckadmind stop')
Execute('rckrb5kdc stop')
elif OSCheck.is_ubuntu_family():
Execute('service krb5-kdc stop')
Execute('service krb5-admin-server stop')
else:
Execute('service krb5kdc stop')
Execute('service kadmin stop')
def configure(self, env):
import params
env.set_params(params)
KerberosServer.write_krb5_conf()
KerberosServer.write_kdc_conf()
KerberosServer.write_kadm5_acl()
def status(self, env):
import params
if OSCheck.is_suse_family():
try:
Execute('checkproc `which krb5kdc`')
Execute('checkproc `which kadmind`')
except Fail as ex:
raise ComponentIsNotRunning()
elif OSCheck.is_ubuntu_family():
check_process_status(params.kdamin_pid_path)
check_process_status(params.krb5kdc_pid_path)
else:
check_process_status(params.kdamin_pid_path)
check_process_status(params.krb5kdc_pid_path)
if __name__ == "__main__":
KerberosServer().execute()
| arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_server.py | Python | apache-2.0 | 3,944 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import json
import math
import multiprocessing
import string
import h5py
from language.capwap.utils import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
MAX_THREADS = 64
# ------------------------------------------------------------------------------
#
# TF Example helpers.
#
# ------------------------------------------------------------------------------
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def string_feature(value):
return bytes_feature(value.encode("utf8"))
def int64_feature_list(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def float_feature_list(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def bytes_feature_list(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def string_feature_list(values):
return bytes_feature([v.encode("utf8") for v in values])
def caption_example(image):
"""Convert image caption data into an Example proto.
Args:
image: A ImageMetadata instance.
Returns:
example: An Example proto with serialized tensor data.
"""
# Collect image object information from metadata.
image_features, positions = read_object(image.objects, image.image_id)
# Serialize multi-dimensional tensor data.
captions_proto = tf.make_tensor_proto(np.array(image.captions))
features_proto = tf.make_tensor_proto(image_features)
positions_proto = tf.make_tensor_proto(positions)
# Create final features dict.
features = dict(
image_id=int64_feature(image.image_id),
captions=bytes_feature(captions_proto.SerializeToString()),
object_features=bytes_feature(features_proto.SerializeToString()),
object_positions=bytes_feature(positions_proto.SerializeToString()))
return tf.train.Example(features=tf.train.Features(feature=features))
def vqa_example(image):
"""Convert visual qa data into an Example proto.
Args:
image: An ImageMetadata instance.
Returns:
example: An Example proto with serialized tensor data.
"""
# Collect image object information from metadata.
image_features, positions = read_object(image.objects, image.image_id)
# Serialize multi-dimensional tensor data.
captions_proto = tf.make_tensor_proto(np.array(image.captions))
question_ids_proto = tf.make_tensor_proto(np.array(image.question_ids))
questions_proto = tf.make_tensor_proto(np.array(image.questions))
features_proto = tf.make_tensor_proto(image_features)
positions_proto = tf.make_tensor_proto(positions)
# Take the first answer always for simplicity.
# This is only used for training and unofficial eval.
answers = copy.deepcopy(image.answers)
for i, answer in enumerate(answers):
answers[i] = answer[0]
answers_proto = tf.make_tensor_proto(np.array(answers))
# Create final features dict.
features = dict(
image_id=int64_feature(image.image_id),
question_ids=bytes_feature(question_ids_proto.SerializeToString()),
questions=bytes_feature(questions_proto.SerializeToString()),
answers=bytes_feature(answers_proto.SerializeToString()),
captions=bytes_feature(captions_proto.SerializeToString()),
object_features=bytes_feature(features_proto.SerializeToString()),
object_positions=bytes_feature(positions_proto.SerializeToString()))
return tf.train.Example(features=tf.train.Features(feature=features))
# ------------------------------------------------------------------------------
#
# Data loading helpers.
#
# ------------------------------------------------------------------------------
def load_karpathy_splits(filename):
"""Load Karpathy COCO ids for train, val, and test."""
splits = {"train": set(), "val": set(), "test": set()}
with tf.io.gfile.GFile(filename, "r") as f:
for image in json.load(f)["images"]:
split = image["split"] if image["split"] != "restval" else "train"
splits[split].add(image["cocoid"])
return splits
def filter_question(question, answer):
"""Apply filtering rules to QA pair."""
question = question.strip(string.punctuation + " ")
answer = answer.strip(string.punctuation + " ")
if not question:
return True
if not answer:
return True
if answer.lower() in ["yes", "no", "none", "unanswerable", "unsuitable"]:
return True
if any([c.isnumeric() for c in answer]):
return True
return False
def read_object(objects, image_id):
"""Read R-CNN object data from HDF5 file."""
# Super slow but oh well.
should_close = False
if isinstance(objects, str):
should_close = True
objects = h5py.File(objects, "r")
image_id = str(image_id)
image_features = objects["features-" + image_id][:]
image_bboxes = objects["bboxes-" + image_id]
image_dims = objects["dims-" + image_id]
positions = image_utils.quantize_bbox(
bboxes=image_bboxes, height=image_dims[0], width=image_dims[1])
if should_close:
objects.close()
return image_features, positions
# ------------------------------------------------------------------------------
#
# Data writing helpers.
#
# ------------------------------------------------------------------------------
def convert_shard(shard, shard_name_fn, example_fn):
"""Multithreading helper to serialize an individual shard to disk.
Args:
shard: Tuple of shard id (int) and examples (ImageMetadata list).
shard_name_fn: Maps shard id to file name.
example_fn: Maps ImageMetadata to Example proto.
"""
shard_id, examples = shard
# The example might have an "objects" attribute, which in this case, points
# to the filename of a HDF5 file that should be opened. Many examples share
# the same objects HDF5 file for storing their object data, so we keep the
# file handle open until we hit an example that points to a different file.
maybe_open_file = hasattr(examples[0], "objects")
if maybe_open_file:
current_file = examples[0].objects
current_file_handle = h5py.File(current_file, "r")
output_file = shard_name_fn(shard_id)
tf.logging.info("Writing shard %s" % output_file)
with tf.io.TFRecordWriter(output_file) as writer:
for example in examples:
# Check if the HDF5 file should be updated.
if maybe_open_file:
if example.objects != current_file:
current_file_handle.close()
current_file = example.objects
current_file_handle = h5py.File(current_file, "r")
example.objects = current_file_handle
example_proto = example_fn(example)
writer.write(example_proto.SerializeToString())
if maybe_open_file:
current_file_handle.close()
def convert_to_tfrecords(dataset, num_shards, basename, example_fn):
"""Convert a dataset to sharded TFRecords.
Args:
dataset: List of ImageMetadata.
num_shards: Number of randomized shards to write dataset to.
basename: Base name to write shards to (/path/name-xxxxx-of-yyyyy).
example_fn: Returns Example proto given example metadata.
"""
# Shuffle the ordering of images.
np.random.seed(12345)
np.random.shuffle(dataset)
# Break dataset into num_shards.
size = int(math.ceil(len(dataset) / num_shards))
shards = [dataset[i:i + size] for i in range(0, len(dataset), size)]
# Map with multithreading.
tf.logging.info("Processing %d shards", num_shards)
num_threads = min([num_shards, MAX_THREADS])
workers = multiprocessing.pool.ThreadPool(num_threads)
shard_name = basename + "-%.5d-of-" + "%.5d" % num_shards
map_fn = functools.partial(
convert_shard,
shard_name_fn=lambda i: shard_name % i,
example_fn=example_fn)
workers.map(map_fn, enumerate(shards))
tf.logging.info("Finished %d shards.", num_shards)
def convert_to_rc(dataset, filename):
"""Convert dataset of ImageMetadata items to RC-formatted JSON file.
Args:
dataset: List of ImageMetadata.
filename: Path to write to.
"""
rc_data = collections.defaultdict(dict)
for example in dataset:
image_id = example.image_id
for i, qid in enumerate(example.question_ids):
question = example.questions[i]
answers = example.answers[i]
rc_data[image_id][qid] = dict(question=question, answers=answers)
with tf.io.gfile.GFile(filename, "w") as f:
json.dump(rc_data, f)
class NumpyEncoder(json.JSONEncoder):
"""Helper to encode things with Numpy objects."""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if np.issubdtype(obj, np.integer):
return int(obj)
if np.issubdtype(obj, np.float):
return float(obj)
if np.issubdtype(obj, np.bool):
return bool(obj)
return json.JSONEncoder.default(self, obj)
| google-research/language | language/capwap/utils/io_utils.py | Python | apache-2.0 | 9,702 |
import hashlib
import datetime
class Block:
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
hash = hashlib.sha256()
hash.update(str(self.index) +
str(self.timestamp) +
str(self.data) +
str(self.previous_hash))
return hash.hexdigest()
def genesis_block():
return Block(0, datetime.datetime.now(), "Genesis Block", "0")
def next_block(previous_block):
index = previous_block.index + 1
timestamp = datetime.datetime.now()
data = "I am block #" + str(index)
previous_hash = previous_block.hash
return Block(index, timestamp, data, previous_hash)
blockchain = [genesis_block()]
previous_block = blockchain[0]
for i in range(0, 50):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
print("Block #" + str(block_to_add.index) + " has been added to the blockchain")
print("Timestamp: " + str(block_to_add.timestamp))
print("Data: " + str(block_to_add.data))
print("Previous Hash: " + str(block_to_add.previous_hash))
print("Hash: " + str(block_to_add.hash) + "\n")
# Resources: https://medium.com/crypto-currently/lets-make-the-tiniest-blockchain-bigger-ac360a328f4d
| shuttlesworthNEO/HashHacks2.0-methOD | Future_Prospects/blockchain.py | Python | mit | 1,336 |
#!/usr/bin/env python
# Copyright 2015 HM Revenue & Customs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import json
import lib
import sys
from os.path import expanduser
import shutil
from jenkins import Jenkins
from git import Git
from config import Configuration
parser = argparse.ArgumentParser(description='Library release tagger - tag non-snapshot libraries')
parser.add_argument('-v', '--verbose', action='store_true', help='Print debug output')
parser.add_argument('projectName', type=str, help='The jenkins build of the repo we want to tag')
parser.add_argument('buildNumber', type=str, help='The jenkins build number we want to tag')
args = parser.parse_args()
WORKSPACE = expanduser("~/.release")
if os.path.exists(WORKSPACE):
shutil.rmtree(WORKSPACE)
os.mkdir(WORKSPACE)
conf = Configuration()
conf.validate()
jenkins = Jenkins(conf.jenkins, conf.jenkins_user, conf.jenkins_key)
def verbose(message):
if args.verbose:
print(message)
def run():
jenkins_project = args.projectName
jenkins_build_number = args.buildNumber
if not jenkins.find_if_build_is_green(jenkins_project, jenkins_build_number):
print("Build #" + jenkins_build_number + " of '" + jenkins_project + "' is not a green build.")
sys.exit(1)
repo_url = jenkins.find_github_repo_url_from_build(jenkins_project)
git = Git(WORKSPACE, repo_url)
commit_id = jenkins.find_commit_id_from_build(jenkins_project, jenkins_build_number)
verbose("commit_id=" + commit_id)
repo_name = git.repo_name()
verbose("repo_name=" + repo_name)
git.clone()
verbose("Git repo '" + repo_name + "' cloned to " + WORKSPACE)
most_recent_tag = git.describe(commit_id)
verbose("Most recent release: " + most_recent_tag)
new_version_number = lib.read_user_preferred_version(repo_name, most_recent_tag)
git.tag(commit_id, "release/" + new_version_number)
run()
| hmrc/release | src/universal/bin/release.py | Python | apache-2.0 | 2,453 |
class BaseSeekSet(object):
def __init__(self, sets):
self._sets = list(sets)
def __contains__(self, item):
raise NotImplementedError()
def __getitem__(self, seek):
"""Use `seek` to index into `self` and return set of available letters."""
if isinstance(seek, slice):
start, stop, step = seek.start, seek.stop, seek.step
elif isinstance(seek, int):
start, stop, step = seek, seek, 1
else:
start, stop, step = None, None, None
if start is None:
# Indexing for lookup.
return self.seek(seek)
# Slicing.
return self._slice(start, stop, step)
def _slice(self, start, stop, step):
if not start:
return self
raise IndexError('Index "%s" out of bounds' % start)
def __len__(self):
return len(self._sets)
def seek(self, seek):
raise NotImplementedError()
| PhilHarnish/forge | src/data/seek_sets/base_seek_set.py | Python | mit | 851 |
"""
Core eval alignment algorithms.
"""
from functools import partial, wraps
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import warnings
import numpy as np
from pandas._typing import FrameOrSeries
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
def _align_core_single_unary_op(
term,
) -> Tuple[Union[partial, Type[FrameOrSeries]], Optional[Dict[str, int]]]:
typ: Union[partial, Type[FrameOrSeries]]
axes: Optional[Dict[str, int]] = None
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
if hasattr(term.value, "axes"):
axes = _zip_axes_from_type(typ, term.value.axes)
return typ, axes
def _zip_axes_from_type(
typ: Type[FrameOrSeries], new_axes: Sequence[int]
) -> Dict[str, int]:
axes = {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
return axes
def _any_pandas_objects(terms) -> bool:
"""
Check a sequence of terms for instances of PandasObject.
"""
return any(isinstance(term.value, PandasObject) for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
term_dims = [terms[i].value.ndim for i in term_index]
from pandas import Series
ndims = Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, ABCSeries)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how="outer")
for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, "reindex"):
transpose = isinstance(ti, ABCSeries) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = (
f"Alignment difference on axis {axis} is larger "
f"than an order of magnitude on term {repr(terms[i].name)}, "
f"by more than {ordm:.4g}; performance may suffer"
)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def align_terms(terms):
"""
Align a set of terms.
"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def reconstruct_object(typ, obj, axes, dtype):
"""
Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if not isinstance(typ, partial) and issubclass(typ, PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
| TomAugspurger/pandas | pandas/core/computation/align.py | Python | bsd-3-clause | 5,860 |
'''
payforward object
'''
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Date, Integer, ForeignKey
from base import __base__
class Payforward(__base__): # pylint: disable=R0903
"""table of regular payments which was payed before described date.
to manage payments
also used as indicator of credits
"""
__tablename__ = 'payforwards'
record_id = Column(Integer, primary_key=True, name='id')
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
income_id = Column(Integer, ForeignKey('incomes.id'), nullable=True)
income = relationship('Income')
income_date = Column(Date, nullable=False)
# payment_date = Column(Date, nullable=False)
transaction_id = Column(Integer, ForeignKey(
'transactions.id'), nullable=False)
transaction = relationship('Transaction')
| obezpalko/4k | e4/payforward.py | Python | mit | 858 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_host
short_description: Manages hosts on Apache CloudStack based clouds.
description:
- Create, update and remove hosts.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the host.
required: true
aliases: [ 'ip_address' ]
url:
description:
- Url of the host used to create a host.
- If not provided, C(http://) and param C(name) is used as url.
- Only considered if C(state=present) and host does not yet exist.
required: false
default: null
username:
description:
- Username for the host.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
password:
description:
- Password for the host.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
pod:
description:
- Name of the pod.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
cluster:
description:
- Name of the cluster.
required: false
default: null
hypervisor:
description:
- Name of the cluster.
- Required if C(state=present) and host does not yet exist.
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator' ]
required: false
default: null
allocation_state:
description:
- Allocation state of the host.
choices: [ 'enabled', 'disabled' ]
required: false
default: null
host_tags:
description:
- Tags of the host.
required: false
default: null
state:
description:
- State of the host.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a host is present but disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
cluster: vcenter.example.com/ch-zrh-ix/pod01-cluster01
pod: pod01
zone: ch-zrh-ix-01
hypervisor: VMware
allocation_state: disabled
host_tags:
- perf
- gpu
# Ensure an existing host is disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: disabled
# Ensure an existing host is disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: enabled
# Ensure a host is absent
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
capabilities:
description: Capabilities of the host.
returned: success
type: string
sample: hvm
cluster:
description: Cluster of the host.
returned: success
type: string
sample: vcenter.example.com/zone/cluster01
cluster_type:
description: Type of the cluster of the host.
returned: success
type: string
sample: ExternalManaged
cpu_allocated:
description: Amount in percent of the host's CPU currently allocated.
returned: success
type: string
sample: 166.25%
cpu_number:
description: Number of CPUs of the host.
returned: success
type: string
sample: 24
cpu_sockets:
description: Number of CPU sockets of the host.
returned: success
type: int
sample: 2
cpu_speed:
description: CPU speed in Mhz
returned: success
type: int
sample: 1999
cpu_used:
description: Amount of the host's CPU currently used.
returned: success
type: string
sample: 33.6%
cpu_with_overprovisioning:
description: Amount of the host's CPU after applying the cpu.overprovisioning.factor.
returned: success
type: string
sample: 959520.0
created:
description: Date when the host was created.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disconnected:
description: Date when the host was disconnected.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disk_size_allocated:
description: Host's currently allocated disk size.
returned: success
type: int
sample: 2593
disk_size_total:
description: Total disk size of the host
returned: success
type: int
sample: 259300
events:
description: Events available for the host
returned: success
type: string
sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown"
ha_host:
description: Whether the host is a HA host.
returned: success
type: bool
sample: false
has_enough_capacity:
description: Whether the host has enough CPU and RAM capacity to migrate a VM to it.
returned: success
type: bool
sample: true
host_tags:
description: Comma-separated list of tags for the host.
returned: success
type: string
sample: "perf"
hypervisor:
description: Host's hypervisor.
returned: success
type: string
sample: VMware
hypervisor_version:
description: Hypervisor version.
returned: success
type: string
sample: 5.1
ip_address:
description: IP address of the host
returned: success
type: string
sample: 10.10.10.1
is_local_storage_active:
description: Whether the local storage is available or not.
returned: success
type: bool
sample: false
last_pinged:
description: Date and time the host was last pinged.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
management_server_id:
description: Management server ID of the host.
returned: success
type: int
sample: 345050593418
memory_allocated:
description: Amount of the host's memory currently allocated.
returned: success
type: int
sample: 69793218560
memory_total:
description: Total of memory of the host.
returned: success
type: int
sample: 206085263360
memory_used:
description: Amount of the host's memory currently used.
returned: success
type: int
sample: 65504776192
name:
description: Name of the host.
returned: success
type: string
sample: esx32.example.com
network_kbs_read:
description: Incoming network traffic on the host.
returned: success
type: int
sample: 0
network_kbs_write:
description: Outgoing network traffic on the host.
returned: success
type: int
sample: 0
os_category:
description: OS category name of the host.
returned: success
type: string
sample: ...
out_of_band_management:
description: Host out-of-band management information.
returned: success
type: string
sample: ...
pod:
description: Pod name of the host.
returned: success
type: string
sample: Pod01
removed:
description: Date and time the host was removed.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
resource_state:
description: Resource state of the host.
returned: success
type: string
sample: Enabled
allocation_state::
description: Allocation state of the host.
returned: success
type: string
sample: enabled
state:
description: State of the host.
returned: success
type: string
sample: Up
suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
to it or not.
returned: success
type: string
sample: true
host_type:
description: Type of the host.
returned: success
type: string
sample: Routing
host_version:
description: Version of the host.
returned: success
type: string
sample: 4.5.2
gpu_group:
description: GPU cards present in the host.
returned: success
type: list
sample: []
zone:
description: Zone of the host.
returned: success
type: string
sample: zone01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS
)
import time
class AnsibleCloudStackHost(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackHost, self).__init__(module)
self.returns = {
'averageload': 'average_load',
'capabilities': 'capabilities',
'clustername': 'cluster',
'clustertype': 'cluster_type',
'cpuallocated': 'cpu_allocated',
'cpunumber': 'cpu_number',
'cpusockets': 'cpu_sockets',
'cpuspeed': 'cpu_speed',
'cpuused': 'cpu_used',
'cpuwithoverprovisioning': 'cpu_with_overprovisioning',
'disconnected': 'disconnected',
'details': 'details',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'events': 'events',
'hahost': 'ha_host',
'hasenoughcapacity': 'has_enough_capacity',
'hypervisor': 'hypervisor',
'hypervisorversion': 'hypervisor_version',
'ipaddress': 'ip_address',
'islocalstorageactive': 'is_local_storage_active',
'lastpinged': 'last_pinged',
'managementserverid': 'management_server_id',
'memoryallocated': 'memory_allocated',
'memorytotal': 'memory_total',
'memoryused': 'memory_used',
'networkkbsread': 'network_kbs_read',
'networkkbswrite': 'network_kbs_write',
'oscategoryname': 'os_category',
'outofbandmanagement': 'out_of_band_management',
'podname': 'pod',
'removed': 'removed',
'resourcestate': 'resource_state',
'suitableformigration': 'suitable_for_migration',
'type': 'host_type',
'version': 'host_version',
'gpugroup': 'gpu_group',
}
self.allocation_states = {
'enabled': 'Enable',
'disabled': 'Disable',
}
self.host = None
def get_pod(self, key=None):
pod_name = self.module.params.get('pod')
if not pod_name:
return None
args = {
'name': pod_name,
'zoneid': self.get_zone(key='id'),
}
pods = self.cs.listPods(**args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found" % pod_name)
def get_cluster(self, key=None):
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
'zoneid': self.get_zone(key='id'),
}
clusters = self.cs.listClusters(**args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.module.fail_json(msg="Cluster %s not found" % cluster_name)
def get_host_tags(self):
host_tags = self.module.params.get('host_tags')
if host_tags is None:
return None
return ','.join(host_tags)
def get_allocation_state(self):
allocation_state = self.module.params.get('allocation_state')
if allocation_state is None:
return None
return self.allocation_states[allocation_state]
def get_host(self, refresh=False):
if self.host is not None and not refresh:
return self.host
name = self.module.params.get('name')
args = {
'zoneid': self.get_zone(key='id'),
}
res = self.cs.listHosts(**args)
if res:
for h in res['host']:
if name in [h['ipaddress'], h['name']]:
self.host = h
return self.host
def present_host(self):
host = self.get_host()
if not host:
host = self._create_host(host)
else:
host = self._update_host(host)
return host
def _get_url(self):
url = self.module.params.get('url')
if url:
return url
else:
return "http://%s" % self.module.params.get('name')
def _create_host(self, host):
required_params = [
'password',
'username',
'hypervisor',
'pod',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = {
'hypervisor': self.module.params.get('hypervisor'),
'url': self._get_url(),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'podid': self.get_pod(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
'allocationstate': self.get_allocation_state(),
'hosttags': self.get_host_tags(),
}
if not self.module.check_mode:
host = self.cs.addHost(**args)
if 'errortext' in host:
self.module.fail_json(msg="Failed: '%s'" % host['errortext'])
host = host['host'][0]
return host
def _update_host(self, host):
args = {
'id': host['id'],
'hosttags': self.get_host_tags(),
'allocationstate': self.get_allocation_state()
}
host['allocationstate'] = self.allocation_states[host['resourcestate'].lower()]
if self.has_changed(args, host):
self.result['changed'] = True
if not self.module.check_mode:
host = self.cs.updateHost(**args)
if 'errortext' in host:
self.module.fail_json(msg="Failed: '%s'" % host['errortext'])
host = host['host']
return host
def absent_host(self):
host = self.get_host()
if host:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.enable_maintenance()
if res:
res = self.cs.deleteHost(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return host
def enable_maintenance(self):
host = self.get_host()
if host['resourcestate'] not in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.cs.prepareHostForMaintenance(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
host = self.poll_job(res, 'host')
self._poll_for_maintenance()
return host
def _poll_for_maintenance(self):
for i in range(0, 300):
time.sleep(2)
host = self.get_host(refresh=True)
if not host:
return None
elif host['resourcestate'] != 'PrepareForMaintenance':
return host
self.fail_json("Polling for maintenance timed out")
def get_result(self, host):
super(AnsibleCloudStackHost, self).get_result(host)
if host:
self.result['allocation_state'] = host['resourcestate'].lower()
self.result['host_tags'] = host['hosttags'].split(',') if host.get('hosttags') else []
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['ip_address']),
url=dict(),
password=dict(default=None, no_log=True),
username=dict(default=None),
hypervisor=dict(choices=CS_HYPERVISORS, default=None),
allocation_state=dict(default=None),
pod=dict(default=None),
cluster=dict(default=None),
host_tags=dict(default=None, type='list'),
zone=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_host = AnsibleCloudStackHost(module)
state = module.params.get('state')
if state == 'absent':
host = acs_host.absent_host()
else:
host = acs_host.present_host()
result = acs_host.get_result(host)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| jonathonwalz/ansible | lib/ansible/modules/cloud/cloudstack/cs_host.py | Python | gpl-3.0 | 17,672 |
"""
Remote package support using ``pkg_add(1)``
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
.. warning::
This module has been completely rewritten. Up to and including version
0.17.0, it supported ``pkg_add(1)``, but checked for the existence of a
pkgng local database and, if found, would provide some of pkgng's
functionality. The rewrite of this module has removed all pkgng support,
and moved it to the :mod:`pkgng <salt.modules.pkgng>` execution module. For
versions <= 0.17.0, the documentation here should not be considered
accurate. If your Minion is running one of these versions, then the
documentation for this module can be viewed using the :mod:`sys.doc
<salt.modules.sys.doc>` function:
.. code-block:: bash
salt bsdminion sys.doc pkg
This module acts as the default package provider for FreeBSD 9 and older. If
you need to use pkgng on a FreeBSD 9 system, you will need to override the
``pkg`` provider by setting the :conf_minion:`providers` parameter in your
Minion config file, in order to use pkgng.
.. code-block:: yaml
providers:
pkg: pkgng
More information on pkgng support can be found in the documentation for the
:mod:`pkgng <salt.modules.pkgng>` module.
This module will respect the ``PACKAGEROOT`` and ``PACKAGESITE`` environment
variables, if set, but these values can also be overridden in several ways:
1. :strong:`Salt configuration parameters.` The configuration parameters
``freebsdpkg.PACKAGEROOT`` and ``freebsdpkg.PACKAGESITE`` are recognized.
These config parameters are looked up using :mod:`config.get
<salt.modules.config.get>` and can thus be specified in the Master config
file, Grains, Pillar, or in the Minion config file. Example:
.. code-block:: yaml
freebsdpkg.PACKAGEROOT: ftp://ftp.freebsd.org/
freebsdpkg.PACKAGESITE: ftp://ftp.freebsd.org/pub/FreeBSD/ports/ia64/packages-9-stable/Latest/
2. :strong:`CLI arguments.` Both the ``packageroot`` (used interchangeably with
``fromrepo`` for API compatibility) and ``packagesite`` CLI arguments are
recognized, and override their config counterparts from section 1 above.
.. code-block:: bash
salt -G 'os:FreeBSD' pkg.install zsh fromrepo=ftp://ftp2.freebsd.org/
salt -G 'os:FreeBSD' pkg.install zsh packageroot=ftp://ftp2.freebsd.org/
salt -G 'os:FreeBSD' pkg.install zsh packagesite=ftp://ftp2.freebsd.org/pub/FreeBSD/ports/ia64/packages-9-stable/Latest/
.. note::
These arguments can also be passed through in states:
.. code-block:: yaml
zsh:
pkg.installed:
- fromrepo: ftp://ftp2.freebsd.org/
"""
import copy
import logging
import re
import salt.utils.data
import salt.utils.functools
import salt.utils.pkg
from salt.exceptions import CommandExecutionError, MinionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Load as 'pkg' on FreeBSD versions less than 10.
Don't load on FreeBSD 9 when the config option
``providers:pkg`` is set to 'pkgng'.
"""
if __grains__["os"] == "FreeBSD" and float(__grains__["osrelease"]) < 10:
providers = {}
if "providers" in __opts__:
providers = __opts__["providers"]
if providers and "pkg" in providers and providers["pkg"] == "pkgng":
log.debug(
"Configuration option 'providers:pkg' is set to "
"'pkgng', won't load old provider 'freebsdpkg'."
)
return (
False,
"The freebsdpkg execution module cannot be loaded: the configuration"
" option 'providers:pkg' is set to 'pkgng'",
)
return __virtualname__
return (
False,
"The freebsdpkg execution module cannot be loaded: either the os is not FreeBSD"
" or the version of FreeBSD is >= 10.",
)
def _get_repo_options(fromrepo=None, packagesite=None):
"""
Return a list of tuples to seed the "env" list, which is used to set
environment variables for any pkg_add commands that are spawned.
If ``fromrepo`` or ``packagesite`` are None, then their corresponding
config parameter will be looked up with config.get.
If both ``fromrepo`` and ``packagesite`` are None, and neither
freebsdpkg.PACKAGEROOT nor freebsdpkg.PACKAGESITE are specified, then an
empty list is returned, and it is assumed that the system defaults (or
environment variables) will be used.
"""
root = (
fromrepo
if fromrepo is not None
else __salt__["config.get"]("freebsdpkg.PACKAGEROOT", None)
)
site = (
packagesite
if packagesite is not None
else __salt__["config.get"]("freebsdpkg.PACKAGESITE", None)
)
ret = {}
if root is not None:
ret["PACKAGEROOT"] = root
if site is not None:
ret["PACKAGESITE"] = site
return ret
def _match(names):
"""
Since pkg_delete requires the full "pkgname-version" string, this function
will attempt to match the package name with its version. Returns a list of
partial matches and package names that match the "pkgname-version" string
required by pkg_delete, and a list of errors encountered.
"""
pkgs = list_pkgs(versions_as_list=True)
errors = []
# Look for full matches
full_pkg_strings = []
out = __salt__["cmd.run_stdout"](
["pkg_info"], output_loglevel="trace", python_shell=False
)
for line in out.splitlines():
try:
full_pkg_strings.append(line.split()[0])
except IndexError:
continue
full_matches = [x for x in names if x in full_pkg_strings]
# Look for pkgname-only matches
matches = []
ambiguous = []
for name in set(names) - set(full_matches):
cver = pkgs.get(name)
if cver is not None:
if len(cver) == 1:
matches.append("{}-{}".format(name, cver[0]))
else:
ambiguous.append(name)
errors.append(
"Ambiguous package '{}'. Full name/version required. "
"Possible matches: {}".format(
name, ", ".join(["{}-{}".format(name, x) for x in cver])
)
)
# Find packages that did not match anything
not_matched = set(names) - set(matches) - set(full_matches) - set(ambiguous)
for name in not_matched:
errors.append("Package '{}' not found".format(name))
return matches + full_matches, errors
def latest_version(*names, **kwargs):
"""
``pkg_add(1)`` is not capable of querying for remote packages, so this
function will always return results as if there is no package available for
install or upgrade.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
return "" if len(names) == 1 else {x: "" for x in names}
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def version(*names, **kwargs):
"""
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
with_origin : False
Return a nested dictionary containing both the origin name and version
for each specified package.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
"""
with_origin = kwargs.pop("with_origin", False)
ret = __salt__["pkg_resource.version"](*names, **kwargs)
if not salt.utils.data.is_true(with_origin):
return ret
# Put the return value back into a dict since we're adding a subdict
if len(names) == 1:
ret = {names[0]: ret}
origins = __context__.get("pkg.origin", {})
return {x: {"origin": origins.get(x, ""), "version": y} for x, y in ret.items()}
def refresh_db(**kwargs):
"""
``pkg_add(1)`` does not use a local database of available packages, so this
function simply returns ``True``. it exists merely for API compatibility.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
"""
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
return True
def _list_pkgs_from_context(versions_as_list, with_origin):
"""
Use pkg list from __context__
"""
ret = copy.deepcopy(__context__["pkg.list_pkgs"])
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
if salt.utils.data.is_true(with_origin):
origins = __context__.get("pkg.origin", {})
return {x: {"origin": origins.get(x, ""), "version": y} for x, y in ret.items()}
return ret
def list_pkgs(versions_as_list=False, with_origin=False, **kwargs):
"""
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
with_origin : False
Return a nested dictionary containing both the origin name and version
for each installed package.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
"""
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any(
[salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
):
return {}
if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list, with_origin)
ret = {}
origins = {}
out = __salt__["cmd.run_stdout"](
["pkg_info", "-ao"], output_loglevel="trace", python_shell=False
)
pkgs_re = re.compile(r"Information for ([^:]+):\s*Origin:\n([^\n]+)")
for pkg, origin in pkgs_re.findall(out):
if not pkg:
continue
try:
pkgname, pkgver = pkg.rsplit("-", 1)
except ValueError:
continue
__salt__["pkg_resource.add_pkg"](ret, pkgname, pkgver)
origins[pkgname] = origin
__salt__["pkg_resource.sort_pkglist"](ret)
__context__["pkg.list_pkgs"] = copy.deepcopy(ret)
__context__["pkg.origin"] = origins
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
if salt.utils.data.is_true(with_origin):
return {x: {"origin": origins.get(x, ""), "version": y} for x, y in ret.items()}
return ret
def install(name=None, refresh=False, fromrepo=None, pkgs=None, sources=None, **kwargs):
"""
Install package(s) using ``pkg_add(1)``
name
The name of the package to be installed.
refresh
Whether or not to refresh the package database before installing.
fromrepo or packageroot
Specify a package repository from which to install. Overrides the
system default, as well as the PACKAGEROOT environment variable.
packagesite
Specify the exact directory from which to install the remote package.
Overrides the PACKAGESITE environment variable, if present.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
sources
A list of packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"}, {"bar": "salt://bar.deb"}]'
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
"""
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
packageroot = kwargs.get("packageroot")
if not fromrepo and packageroot:
fromrepo = packageroot
env = _get_repo_options(fromrepo, kwargs.get("packagesite"))
args = []
if pkg_type == "repository":
args.append("-r") # use remote repo
args.extend(pkg_params)
old = list_pkgs()
out = __salt__["cmd.run_all"](
["pkg_add"] + args, env=env, output_loglevel="trace", python_shell=False
)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
_rehash()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def remove(name=None, pkgs=None, **kwargs):
"""
Remove packages using ``pkg_delete(1)``
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
try:
pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets, errors = _match([x for x in pkg_params])
for error in errors:
log.error(error)
if not targets:
return {}
out = __salt__["cmd.run_all"](
["pkg_delete"] + targets, output_loglevel="trace", python_shell=False
)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered removing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
# Support pkg.delete to remove packages to more closely match pkg_delete
delete = salt.utils.functools.alias_function(remove, "delete")
# No equivalent to purge packages, use remove instead
purge = salt.utils.functools.alias_function(remove, "purge")
def _rehash():
"""
Recomputes internal hash table for the PATH variable. Use whenever a new
command is created during the current session.
"""
shell = __salt__["environ.get"]("SHELL")
if shell.split("/")[-1] in ("csh", "tcsh"):
__salt__["cmd.shell"]("rehash", output_loglevel="trace")
def file_list(*packages, **kwargs):
"""
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
ret = file_dict(*packages)
files = []
for pkg_files in ret["files"].values():
files.extend(pkg_files)
ret["files"] = files
return ret
def file_dict(*packages, **kwargs):
"""
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the
system's package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
errors = []
files = {}
if packages:
match_pattern = "'{0}-[0-9]*'"
cmd = ["pkg_info", "-QL"] + [match_pattern.format(p) for p in packages]
else:
cmd = ["pkg_info", "-QLa"]
ret = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
for line in ret["stderr"].splitlines():
errors.append(line)
pkg = None
for line in ret["stdout"].splitlines():
if pkg is not None and line.startswith("/"):
files[pkg].append(line)
elif ":/" in line:
pkg, fn = line.split(":", 1)
pkg, ver = pkg.rsplit("-", 1)
files[pkg] = [fn]
else:
continue # unexpected string
return {"errors": errors, "files": files}
| saltstack/salt | salt/modules/freebsdpkg.py | Python | apache-2.0 | 17,447 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all system defined creative templates.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v202111')
# Create a statement to select creative templates.
statement = (ad_manager.StatementBuilder(version='v202111')
.Where('type = :type')
.WithBindVariable('type', 'SYSTEM_DEFINED'))
# Retrieve a small amount of creative templates at a time, paging
# through until all creative templates have been retrieved.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for creative_template in response['results']:
# Print out some information for each creative template.
print('Creative template with ID "%d" and name "%s" was found.\n' %
(creative_template['id'], creative_template['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| googleads/googleads-python-lib | examples/ad_manager/v202111/creative_template_service/get_system_defined_creative_templates.py | Python | apache-2.0 | 2,006 |
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://pytest.org/latest/example/pythoncollection.html
collect_ignore = []
try:
import wdlparse
print(wdlparse.__file__) # keep this import from being removed
except ImportError:
collect_ignore.append("toilwdl.py")
| BD2KGenomics/slugflow | src/toil/test/wdl/conftest.py | Python | apache-2.0 | 846 |
from __future__ import division, print_function, absolute_import
import sys
if sys.version_info[0] >= 3:
DEFINE_MACROS = [("SCIPY_PY3K", None)]
else:
DEFINE_MACROS = []
def configuration(parent_package='', top_path=None):
from scipy._build_utils.system_info import get_info
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('cluster', parent_package, top_path)
blas_opt = get_info('lapack_opt')
config.add_data_dir('tests')
config.add_extension('_vq',
sources=[('_vq.c')],
include_dirs=[get_numpy_include_dirs()],
extra_info=blas_opt)
config.add_extension('_hierarchy',
sources=[('_hierarchy.c')],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_optimal_leaf_ordering',
sources=[('_optimal_leaf_ordering.c')],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| lhilt/scipy | scipy/cluster/setup.py | Python | bsd-3-clause | 1,061 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import math
from shinken.util import safe_print
from shinken.misc.perfdata import PerfDatas
# Will try to return a dict with:
# lnk: link to add in this perfdata thing
# title: text to show on it
# metrics: list of ('html color', percent) like [('#68f', 35), ('white', 64)]
def get_perfometer_table_values(elt):
# first try to get the command name called
cmd = elt.check_command.call.split('!')[0]
safe_print("Looking for perfometer value for command", cmd)
tab = {'check_http': manage_check_http_command,
'check_ping': manage_check_ping_command,
'check_tcp': manage_check_tcp_command,
'check_ftp': manage_check_tcp_command,
}
f = tab.get(cmd, None)
if f:
return f(elt)
try:
r = manage_unknown_command(elt)
except:
return None
return r
def manage_check_http_command(elt):
safe_print('Get check_http perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'time' in p:
print "No time in p"
return None
m = p['time']
v = m.value
if not v:
print "No value, I bailout"
return None
# Percent of ok should be time/1s
pct = get_logarithmic(v, 1)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%ss' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_check_ping_command(elt):
safe_print('Get check_ping perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'rta' in p:
print "No rta in p"
return None
m = p['rta']
v = m.value
crit = m.critical
if not v or not crit:
print "No value, I bailout"
return None
# Percent of ok should be the log of time versus max/2
pct = get_logarithmic(v, crit / 2)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%sms' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_check_tcp_command(elt):
safe_print('Get check_tcp perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'time' in p:
print "No time in p"
return None
m = p['time']
v = m.value
if not v or not m.max:
print "No value, I bailout"
return None
# Percent of ok should be the log of time versus m.max / 2
pct = get_logarithmic(v, m.max / 2)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
#pct = 100 * (v / m.max)
# Convert to int
#pct = int(pct)
# Minimum 1%, maximum 100%
#pct = min(max(1, pct), 100)
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%ss' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_unknown_command(elt):
safe_print('Get an unmanaged command perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if len(p) == 0:
return None
m = None
# Got some override name we know to be ok for printing
if 'time' in p:
m = p['time']
else:
for v in p:
#print "Look for", v
if v.name is not None and v.value is not None:
m = v
break
prop = m.name
safe_print("Got a property", prop, "and a value", m)
v = m.value
if not v:
print "No value, I bailout"
return None
# Now look if min/max are available or not
pct = 0
if m.min and m.max and (m.max - m.min != 0):
pct = 100 * (v / (m.max - m.min))
else: # ok, we will really guess this time...
# Percent of ok should be time/10s
pct = 100 * (v / 10)
# go to int
pct = int(pct)
# But at least 1%
pct = max(1, pct)
# And max to 100%
pct = min(pct, 100)
lnk = '#'
color = get_linear_color(elt, prop)
s_color = 'RGB(%d,%d,%d)' % color
metrics = [(s_color, pct), ('white', 100-pct)]
uom = '' or m.uom
title = '%s%s' % (v, uom)
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
# Get a linear color by looking at the command name
# and the elt status to get a unique value
def get_linear_color(elt, name):
# base colors are
# #6688ff (102,136,255) light blue for OK
# #ffdd65 (255,221,101) ligth wellow for warning
# #ff6587 (191,75,101) light red for critical
# #b3c4ff (179,196,255) very light blue for unknown
base = {0: (102, 136, 255), 1: (255, 221, 101), 2: (191, 75, 101)}
state_id = get_stateid(elt)
c = base.get(state_id, (179, 196, 255))
# Get a "hash" of the metric name
h = hash(name) % 25
#print "H", h
# Most value are high in red, so to do not overlap, go down
red = (c[0] - h) % 256
green = (c[1] - h) % 256
blue = (c[2] - h) % 256
color = (red, green, blue)
print "Get color", color
return color
def get_stateid(elt):
state_id = elt.state_id
# For host, make DOWN as critical
if state_id == 1 and elt.__class__.my_type == 'host':
state_id = 2
return state_id
def get_logarithmic(value, half):
l_half = math.log(half, 10)
print 'Half is', l_half
l_value = math.log(value, 10)
print "l value is", l_value
# Get the percent of our value for what we asked for
r = 50 + 10.0 * (l_value - l_half)
# Make it an int between 1 and 100
r = int(r)
r = max(1, r)
r = min(r, 100)
return r
| wbsavage/shinken | shinken/modules/webui_broker/perfdata_guess.py | Python | agpl-3.0 | 7,533 |
from setuptools import find_packages, setup
from ats_sms_operator.version import get_version
setup(
name='django-ats-sms-operator',
version=get_version(),
description="ATS SMS operator library.",
keywords='django, sms receiver',
author='Lubos Matl, Oskar Hollmann',
author_email='[email protected], [email protected]',
url='https://github.com/druids/django-ats-sms-operator',
license='LGPL',
package_dir={'ats_sms_operator': 'ats_sms_operator'},
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 1 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU LESSER GENERAL PUBLIC LICENSE (LGPL)',
'Natural Language :: Czech',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
install_requires=[
'django>=1.6',
'beautifulsoup4>=4.4.0',
'html5lib>=0.999999',
'django-ipware>=1.0.0',
'requests==2.9.0',
],
zip_safe=False,
)
| matllubos/django-ats-sms-operator | setup.py | Python | lgpl-3.0 | 1,302 |
# -*- coding: utf-8 -*-
from apis.models.test import Test
from . import Resource
class TestsBanner(Resource):
async def get(self, request):
tests = (Test.objects(status=Test.STATUS_PUBLISHED, is_sticky=True)
.order_by('-participate_number')
.skip(0).limit(10))
return tests, 200, None
| gusibi/Metis | apis/v1/api/tests_banner.py | Python | apache-2.0 | 344 |
import datetime
import itertools
import re
import urllib2
import mimetypes
import operator
import logging
import sys
import traceback
import warnings
import tagging
import tagging.models
import vidscraper
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.sites.models import Site
from django.contrib.contenttypes import generic
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.core.signals import request_finished
from django.core.validators import ipv4_re
from django.db import models
from django.template import Context, loader
from django.utils.html import escape as html_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from haystack import connections, connection_router
from mptt.models import MPTTModel
from notification import models as notification
from slugify import slugify
from localtv import utils, settings as lsettings
from localtv.managers import SiteRelatedManager, VideoManager
from localtv.signals import post_video_from_vidscraper, submit_finished
from localtv.templatetags.filters import sanitize
VIDEO_SERVICE_REGEXES = (
('YouTube', r'http://gdata\.youtube\.com/feeds/'),
('YouTube', r'http://(www\.)?youtube\.com/'),
('blip.tv', r'http://(.+\.)?blip\.tv/'),
('Vimeo', r'http://(www\.)?vimeo\.com/'),
('Dailymotion', r'http://(www\.)?dailymotion\.com/rss'))
class Thumbnailable(models.Model):
"""
A type of Model that has thumbnails generated for it. Now that we're using
Daguerre for thumbnails, this is just for backwards compatibility.
"""
# we set this to "logo" for SiteSettings, 'icon' for WidgetSettings
thumbnail_attribute = 'thumbnail'
class Meta:
abstract = True
@property
def has_thumbnail(self):
warnings.warn("has_thumbnail is deprecated and will be removed in a "
"future version.", DeprecationWarning)
return bool(getattr(self, self.thumbnail_attribute))
@property
def thumbnail_path(self):
warnings.warn("thumbnail_path is deprecated and will be removed in a "
"future version.", DeprecationWarning)
thumb_file = getattr(self, self.thumbnail_attribute)
if thumb_file:
return thumb_file.name
else:
return ''
class SiteSettings(Thumbnailable):
"""
A model for storing Site-specific settings (feature switches, custom HTML
and CSS, etc) in the database rather than in settings files. Most of
these can thus be set by site admins rather than sysadmins. There are
also a few fields for storing site event state.
"""
thumbnail_attribute = 'logo'
#: Link to the Site these settings are for.
site = models.OneToOneField(Site)
## Site styles ##
#: Custom logo image for this site.
logo = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/logo/%Y/%m/%d/'), blank=True)
#: Custom background image for this site.
background = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/background/%Y/%m/%d/'),
blank=True)
#: Arbitrary custom css overrides.
css = models.TextField(blank=True)
## Custom HTML ##
#: Subheader for the site.
tagline = models.CharField(max_length=4096, blank=True)
#: Arbitrary custom HTML which (currently) is used as a site description
#: on the main page.
sidebar_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays in the footer of all non-admin pages.
footer_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays on the about page.
about_html = models.TextField(blank=True)
## Site permissions ##
#: A collection of Users who have administrative access to the site.
admins = models.ManyToManyField('auth.User', blank=True,
related_name='admin_for')
#: Whether or not the Submit Video button should display or not.
#: Doesn't affect whether videos can be submitted or not.
#: See http://bugzilla.pculture.org/show_bug.cgi?id=19809
display_submit_button = models.BooleanField(default=True)
#: Whether or not users need to log in to submit videos.
submission_requires_login = models.BooleanField(default=False)
#: Whether or not an email address needs to be given with an
#: unauthenticated video submission.
submission_requires_email = models.BooleanField(default=False)
## Feature switches ##
#: Whether playlist functionality is enabled.
playlists_enabled = models.IntegerField(default=1)
#: Whether the original publication date or date added to this site
#: should be used for sorting videos.
use_original_date = models.BooleanField(
default=True,
help_text="If set, use the original date the video was posted. "
"Otherwise, use the date the video was added to this site.")
#: Whether comments should be held for moderation.
screen_all_comments = models.BooleanField(
verbose_name='Hold comments for moderation',
default=True,
help_text="Hold all comments for moderation by default?")
#: Whether leaving a comment requires you to be logged in.
comments_required_login = models.BooleanField(
default=False,
verbose_name="Require Login",
help_text="If True, comments require the user to be logged in.")
## Tracking fields ##
#: Whether a user has elected to hide the "get started" section in
#: the admin interface.
hide_get_started = models.BooleanField(default=False)
objects = SiteRelatedManager()
def __unicode__(self):
return u'%s (%s)' % (self.site.name, self.site.domain)
def user_is_admin(self, user):
"""
Return True if the given User is an admin for this SiteSettings.
"""
if not user.is_authenticated() or not user.is_active:
return False
if user.is_superuser:
return True
return self.admins.filter(pk=user.pk).exists()
def should_show_dashboard(self):
"""Returns True for backwards-compatibility."""
warnings.warn("should_show_dashboard is deprecated and will be "
"removed in a future version.", DeprecationWarning)
return True
class WidgetSettingsManager(SiteRelatedManager):
def _new_entry(self, site, using):
ws = super(WidgetSettingsManager, self)._new_entry(site, using)
try:
site_settings = SiteSettings.objects.get_cached(site, using)
except SiteSettings.DoesNotExist:
pass
else:
if site_settings.logo:
site_settings.logo.open()
ws.icon = site_settings.logo
ws.save()
return ws
class WidgetSettings(Thumbnailable):
"""
A Model which represents the options for controlling the widget creator.
"""
thumbnail_attribute = 'icon'
site = models.OneToOneField(Site)
title = models.CharField(max_length=250, blank=True)
title_editable = models.BooleanField(default=True)
icon = models.ImageField(upload_to=utils.UploadTo('localtv/widgetsettings/icon/%Y/%m/%d/'), blank=True)
icon_editable = models.BooleanField(default=False)
css = models.FileField(upload_to=utils.UploadTo('localtv/widgetsettings/css/%Y/%m/%d/'), blank=True)
css_editable = models.BooleanField(default=False)
bg_color = models.CharField(max_length=20, blank=True)
bg_color_editable = models.BooleanField(default=False)
text_color = models.CharField(max_length=20, blank=True)
text_color_editable = models.BooleanField(default=False)
border_color = models.CharField(max_length=20, blank=True)
border_color_editable = models.BooleanField(default=False)
objects = WidgetSettingsManager()
def get_title_or_reasonable_default(self):
# Is the title worth using? If so, use that.
use_title = True
if self.title.endswith('example.com'):
use_title = False
if not self.title:
use_title = False
# Okay, so either we return the title, or a sensible default
if use_title:
return html_escape(self.title)
return self.generate_reasonable_default_title()
def generate_reasonable_default_title(self):
prefix = 'Watch Videos on %s'
# Now, work on calculating what goes at the end.
site = Site.objects.get_current()
# The default suffix is a self-link. If the site name and
# site domain are plausible, do that.
if ((site.name and site.name.lower() != 'example.com') and
(site.domain and site.domain.lower() != 'example.com')):
suffix = '<a href="http://%s/">%s</a>' % (
site.domain, html_escape(site.name))
# First, we try the site name, if that's a nice string.
elif site.name and site.name.lower() != 'example.com':
suffix = site.name
# Else, we try the site domain, if that's not example.com
elif site.domain.lower() != 'example.com':
suffix = site.domain
else:
suffix = 'our video site'
return prefix % suffix
class Source(Thumbnailable):
"""
An abstract base class to represent things which are sources of multiple
videos. Current subclasses are Feed and SavedSearch.
"""
id = models.AutoField(primary_key=True)
site = models.ForeignKey(Site)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/source/thumbnail/%Y/%m/%d/'),
blank=True)
auto_approve = models.BooleanField(default=False)
auto_update = models.BooleanField(default=True,
help_text=_("If selected, new videos will"
" automatically be imported "
"from this source."))
user = models.ForeignKey('auth.User', null=True, blank=True)
auto_categories = models.ManyToManyField("Category", blank=True)
auto_authors = models.ManyToManyField("auth.User", blank=True,
related_name='auto_%(class)s_set')
class Meta:
abstract = True
def update(self, video_iter, source_import, clear_rejected=False):
"""
Imports videos from a feed/search. `videos` is an iterable which
returns :class:`vidscraper.videos.Video` objects. We use
:method:`.Video.from_vidscraper_video` to map the Vidscraper fields to
Video attributes.
If ``clear_rejected`` is ``True``, rejected versions of videos that are
found in the ``video_iter`` will be deleted and re-imported.
"""
author_pks = list(self.auto_authors.values_list('pk', flat=True))
category_pks = list(self.auto_categories.values_list('pk', flat=True))
import_opts = source_import.__class__._meta
from localtv.tasks import video_from_vidscraper_video, mark_import_pending
total_videos = 0
try:
for vidscraper_video in video_iter:
total_videos += 1
try:
video_from_vidscraper_video.delay(
vidscraper_video.serialize(),
site_pk=self.site_id,
import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk,
status=Video.PENDING,
author_pks=author_pks,
category_pks=category_pks,
clear_rejected=clear_rejected)
except Exception:
source_import.handle_error(
'Import task creation failed for %r' % (
vidscraper_video.url,),
is_skip=True,
with_exception=True)
except Exception:
source_import.fail(with_exception=True)
return
source_import.__class__._default_manager.filter(
pk=source_import.pk
).update(
total_videos=total_videos
)
mark_import_pending.delay(import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk)
class Feed(Source):
"""
Feed to pull videos in from.
If the same feed is used on two different sites, they will require two
separate entries here.
Fields:
- feed_url: The location of this field
- site: which site this feed belongs to
- name: human readable name for this feed
- webpage: webpage that this feed\'s content is associated with
- description: human readable description of this item
- last_updated: last time we ran self.update_items()
- when_submitted: when this feed was first registered on this site
- status: one of Feed.STATUS_CHOICES
- etag: used to see whether or not the feed has changed since our last
update.
- auto_approve: whether or not to set all videos in this feed to approved
during the import process
- user: a user that submitted this feed, if any
- auto_categories: categories that are automatically applied to videos on
import
- auto_authors: authors that are automatically applied to videos on
import
"""
INACTIVE = 0
ACTIVE = 1
STATUS_CHOICES = (
(INACTIVE, _(u'Inactive')),
(ACTIVE, _(u'Active')),
)
feed_url = models.URLField(verify_exists=False)
name = models.CharField(max_length=250)
webpage = models.URLField(verify_exists=False, blank=True)
description = models.TextField(blank=True)
last_updated = models.DateTimeField()
when_submitted = models.DateTimeField(auto_now_add=True)
etag = models.CharField(max_length=250, blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
status = models.IntegerField(choices=STATUS_CHOICES, default=INACTIVE)
class Meta:
unique_together = (
('feed_url', 'site'))
get_latest_by = 'last_updated'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('localtv_list_feed', [self.pk])
def update(self, **kwargs):
"""
Fetch and import new videos from this feed.
"""
try:
FeedImport.objects.get(source=self,
status=FeedImport.STARTED)
except FeedImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
feed_import = FeedImport.objects.create(source=self,
auto_approve=self.auto_approve)
video_iter = vidscraper.auto_feed(
self.feed_url,
max_results=None if self.status == self.INACTIVE else 100,
api_keys=lsettings.API_KEYS,
)
try:
video_iter.load()
except Exception:
feed_import.fail("Data loading failed for {source}",
with_exception=True)
return
self.etag = getattr(video_iter, 'etag', None) or ''
self.last_updated = datetime.datetime.now()
if self.status == self.INACTIVE:
# If these fields have already been changed, don't
# override those changes. Don't unset the name field
# if no further data is available.
if self.name == self.feed_url:
self.name = video_iter.title or self.name
if not self.webpage:
self.webpage = video_iter.webpage or ''
if not self.description:
self.description = video_iter.description or ''
self.save()
super(Feed, self).update(video_iter, source_import=feed_import,
**kwargs)
def source_type(self):
return self.calculated_source_type
def _calculate_source_type(self):
video_service = self.video_service()
if video_service is None:
return u'Feed'
else:
return u'User: %s' % video_service
def video_service(self):
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, self.feed_url, re.I):
return service
def pre_save_set_calculated_source_type(instance, **kwargs):
# Always save the calculated_source_type
instance.calculated_source_type = instance._calculate_source_type()
# Plus, if the name changed, we have to recalculate all the Videos that depend on us.
try:
v = Feed.objects.get(id=instance.id)
except Feed.DoesNotExist:
return instance
if v.name != instance.name:
# recalculate all the sad little videos' calculated_source_type
for vid in instance.video_set.all():
vid.save()
models.signals.pre_save.connect(pre_save_set_calculated_source_type,
sender=Feed)
class Category(MPTTModel):
"""
A category for videos to be contained in.
Categories and tags aren't too different functionally, but categories are
more strict as they can't be defined by visitors. Categories can also be
hierarchical.
Fields:
- site: A link to the django.contrib.sites.models.Site object this object
is bound to
- name: Name of this category
- slug: a slugified verison of the name, used to create more friendly URLs
- logo: An image to associate with this category
- description: human readable description of this item
- parent: Reference to another Category. Allows you to have heirarchical
categories.
"""
site = models.ForeignKey(Site)
name = models.CharField(
max_length=80, verbose_name='Category Name',
help_text=_("The name is used to identify the category almost "
"everywhere; for example, under a video or in a "
"category widget."))
slug = models.SlugField(
verbose_name='Category Slug',
help_text=_("The \"slug\" is the URL-friendly version of the name. It "
"is usually lower-case and contains only letters, numbers "
"and hyphens."))
logo = models.ImageField(
upload_to=utils.UploadTo('localtv/category/logo/%Y/%m/%d/'),
blank=True,
verbose_name='Thumbnail/Logo',
help_text=_("Optional. For example: a leaf for 'environment' or the "
"logo of a university department."))
description = models.TextField(
blank=True, verbose_name='Description (HTML)',
help_text=_("Optional. The description is not prominent by default, but"
" some themes may show it."))
parent = models.ForeignKey(
'self', blank=True, null=True,
related_name='child_set',
verbose_name='Category Parent',
help_text=_("Categories, unlike tags, can have a hierarchy."))
class MPTTMeta:
order_insertion_by = ['name']
class Meta:
unique_together = (
('slug', 'site'),
('name', 'site'))
def __unicode__(self):
return self.name
def dashes(self):
"""
Returns a string of em dashes equal to the :class:`Category`\ 's
level. This is used to indent the category name in the admin
templates.
"""
return mark_safe('—' * self.level)
@models.permalink
def get_absolute_url(self):
return ('localtv_category', [self.slug])
def approved_set(self):
"""
Returns active videos for the category and its subcategories, ordered
by decreasing best date.
"""
opts = self._mptt_meta
lookups = {
'status': Video.ACTIVE,
'categories__left__gte': getattr(self, opts.left_attr),
'categories__left__lte': getattr(self, opts.right_attr),
'categories__tree_id': getattr(self, opts.tree_id_attr)
}
lookups = self._tree_manager._translate_lookups(**lookups)
return Video.objects.filter(**lookups).distinct()
approved_set = property(approved_set)
def unique_error_message(self, model_class, unique_check):
return 'Category with this %s already exists.' % (
unique_check[0],)
class SavedSearch(Source):
"""
A set of keywords to regularly pull in new videos from.
There's an administrative interface for doing "live searches"
Fields:
- site: site this savedsearch applies to
- query_string: a whitespace-separated list of words to search for. Words
starting with a dash will be processed as negative query terms
- when_created: date and time that this search was saved.
"""
query_string = models.TextField()
when_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.query_string
def update(self, **kwargs):
"""
Fetch and import new videos from this search.
"""
try:
SearchImport.objects.get(source=self,
status=SearchImport.STARTED)
except SearchImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
search_import = SearchImport.objects.create(
source=self,
auto_approve=self.auto_approve
)
searches = vidscraper.auto_search(
self.query_string,
max_results=100,
api_keys=lsettings.API_KEYS,
)
video_iters = []
for video_iter in searches:
try:
video_iter.load()
except Exception:
search_import.handle_error(u'Skipping import of search results '
u'from %s' % video_iter.__class__.__name__,
with_exception=True)
continue
video_iters.append(video_iter)
if video_iters:
super(SavedSearch, self).update(itertools.chain(*video_iters),
source_import=search_import,
**kwargs)
else:
# Mark the import as failed if none of the searches could load.
search_import.fail("All searches failed for {source}",
with_exception=False)
def source_type(self):
return u'Search'
class SourceImportIndex(models.Model):
video = models.OneToOneField('Video', unique=True)
index = models.PositiveIntegerField(blank=True, null=True)
class Meta:
abstract = True
class FeedImportIndex(SourceImportIndex):
source_import = models.ForeignKey('FeedImport', related_name='indexes')
class SearchImportIndex(SourceImportIndex):
source_import = models.ForeignKey('SearchImport', related_name='indexes')
class SourceImportError(models.Model):
message = models.TextField()
traceback = models.TextField(blank=True)
is_skip = models.BooleanField(help_text="Whether this error represents a "
"video that was skipped.")
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class FeedImportError(SourceImportError):
source_import = models.ForeignKey('FeedImport', related_name='errors')
class SearchImportError(SourceImportError):
source_import = models.ForeignKey('SearchImport', related_name='errors')
class SourceImport(models.Model):
STARTED = 'started'
PENDING = 'pending'
COMPLETE = 'complete'
FAILED = 'failed'
STATUS_CHOICES = (
(STARTED, _('Started')),
(PENDING, _('Pending haystack updates')),
(COMPLETE, _('Complete')),
(FAILED, _('Failed'))
)
start = models.DateTimeField(auto_now_add=True)
last_activity = models.DateTimeField(blank=True, null=True)
total_videos = models.PositiveIntegerField(blank=True, null=True)
videos_imported = models.PositiveIntegerField(default=0)
videos_skipped = models.PositiveIntegerField(default=0)
#: Caches the auto_approve of the search on the import, so that the imported
#: videos can be approved en masse at the end of the import based on the
#: settings at the beginning of the import.
auto_approve = models.BooleanField()
status = models.CharField(max_length=10, choices=STATUS_CHOICES,
default=STARTED)
class Meta:
get_latest_by = 'start'
ordering = ['-start']
abstract = True
def is_running(self):
"""
Returns True if the SourceImport is currently running.
"""
return self.status in (self.STARTED, self.PENDING)
def set_video_source(self, video):
"""
Sets the value of the correct field on the ``video`` to mark it as
having the same source as this import. Must be implemented by
subclasses.
"""
raise NotImplementedError
def get_videos(self):
raise NotImplementedError
def handle_error(self, message, is_skip=False, with_exception=False):
"""
Logs the error with the default logger and to the database.
:param message: A human-friendly description of the error that does
not contain sensitive information.
:param is_skip: ``True`` if the error results in a video being skipped.
Default: False.
:param with_exception: ``True`` if exception information should be
recorded. Default: False.
:param using: The database to use. Default: 'default'.
"""
if with_exception:
exc_info = sys.exc_info()
logging.warn(message, exc_info=exc_info)
tb = ''.join(traceback.format_exception(*exc_info))
else:
logging.warn(message)
tb = ''
self.errors.create(message=message,
source_import=self,
traceback=tb,
is_skip=is_skip)
if is_skip:
self.__class__._default_manager.filter(pk=self.pk
).update(videos_skipped=models.F('videos_skipped') + 1)
def get_index_creation_kwargs(self, video, vidscraper_video):
return {
'source_import': self,
'video': video,
'index': vidscraper_video.index
}
def handle_video(self, video, vidscraper_video):
"""
Creates an index instance connecting the video to this import.
:param video: The :class:`Video` instance which was imported.
:param vidscraper_video: The original video from :mod:`vidscraper`.
:param using: The database alias to use. Default: 'default'
"""
self.indexes.create(
**self.get_index_creation_kwargs(video, vidscraper_video))
self.__class__._default_manager.filter(pk=self.pk
).update(videos_imported=models.F('videos_imported') + 1)
def fail(self, message="Import failed for {source}", with_exception=False):
"""
Mark an import as failed, along with some post-fail cleanup.
"""
self.status = self.FAILED
self.last_activity = datetime.datetime.now()
self.save()
self.handle_error(message.format(source=self.source),
with_exception=with_exception)
self.get_videos().delete()
class FeedImport(SourceImport):
source = models.ForeignKey(Feed, related_name='imports')
def set_video_source(self, video):
video.feed_id = self.source_id
def get_videos(self):
return Video.objects.filter(feedimportindex__source_import=self)
class SearchImport(SourceImport):
source = models.ForeignKey(SavedSearch, related_name='imports')
def set_video_source(self, video):
video.search_id = self.source_id
def get_videos(self):
return Video.objects.filter(searchimportindex__source_import=self)
class Video(Thumbnailable):
"""
Fields:
- name: Name of this video
- site: Site this video is attached to
- description: Video description
- tags: A list of Tag objects associated with this item
- categories: Similar to Tags
- authors: the person/people responsible for this video
- file_url: The file this object points to (if any) ... if not
provided, at minimum we need the embed_code for the item.
- file_url_length: size of the file, in bytes
- file_url_mimetype: mimetype of the file
- when_submitted: When this item was first entered into the
database
- when_approved: When this item was marked to appear publicly on
the site
- when_published: When this file was published at its original
source (if known)
- last_featured: last time this item was featured.
- status: one of Video.STATUS_CHOICES
- feed: which feed this item came from (if any)
- website_url: The page that this item is associated with.
- embed_code: code used to embed this item.
- flash_enclosure_url: Crappy enclosure link that doesn't
actually point to a url.. the kind crappy flash video sites
give out when they don't actually want their enclosures to
point to video files.
- guid: data used to identify this video
- thumbnail_url: url to the thumbnail, if such a thing exists
- user: if not None, the user who submitted this video
- search: if not None, the SavedSearch from which this video came
- video_service_user: if not blank, the username of the user on the video
service who owns this video. We can figure out the service from the
website_url.
- contact: a free-text field for anonymous users to specify some contact
info
- notes: a free-text field to add notes about the video
"""
UNAPPROVED = 0
ACTIVE = 1
REJECTED = 2
PENDING = 3
STATUS_CHOICES = (
(UNAPPROVED, _(u'Unapproved')),
(ACTIVE, _(u'Active')),
(REJECTED, _(u'Rejected')),
(PENDING, _(u'Waiting on import to finish')),
)
site = models.ForeignKey(Site)
name = models.CharField(verbose_name="Video Name", max_length=250)
description = models.TextField(verbose_name="Video Description (optional)",
blank=True)
thumbnail_url = models.URLField(verbose_name="Thumbnail URL (optional)",
verify_exists=False, blank=True,
max_length=400)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/video/thumbnail/%Y/%m/%d/'),
blank=True)
categories = models.ManyToManyField(Category, blank=True)
authors = models.ManyToManyField('auth.User', blank=True,
related_name='authored_set')
file_url = models.URLField(verify_exists=False, blank=True,
max_length=2048)
file_url_length = models.IntegerField(null=True, blank=True)
file_url_mimetype = models.CharField(max_length=60, blank=True)
when_modified = models.DateTimeField(auto_now=True,
db_index=True,
default=datetime.datetime.now)
when_submitted = models.DateTimeField(auto_now_add=True)
when_approved = models.DateTimeField(null=True, blank=True)
when_published = models.DateTimeField(null=True, blank=True)
last_featured = models.DateTimeField(null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=UNAPPROVED)
feed = models.ForeignKey(Feed, null=True, blank=True)
website_url = models.URLField(
verbose_name='Original Video Page URL (optional)',
max_length=2048,
verify_exists=False,
blank=True)
embed_code = models.TextField(verbose_name="Video <embed> code", blank=True)
flash_enclosure_url = models.URLField(verify_exists=False, max_length=2048,
blank=True)
guid = models.CharField(max_length=250, blank=True)
user = models.ForeignKey('auth.User', null=True, blank=True)
search = models.ForeignKey(SavedSearch, null=True, blank=True)
video_service_user = models.CharField(max_length=250, blank=True)
video_service_url = models.URLField(verify_exists=False, blank=True)
contact = models.CharField(verbose_name='Email (optional)', max_length=250,
blank=True)
notes = models.TextField(verbose_name='Notes (optional)', blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
objects = VideoManager()
taggeditem_set = generic.GenericRelation(tagging.models.TaggedItem,
content_type_field='content_type',
object_id_field='object_id')
class Meta:
ordering = ['-when_submitted']
get_latest_by = 'when_modified'
def __unicode__(self):
return self.name
def clean(self):
# clean is always run during ModelForm cleaning. If a model form is in
# play, rejected videos don't matter; the submission of that form
# should be considered valid. During automated imports, rejected
# videos are not excluded.
self._check_for_duplicates(exclude_rejected=True)
def _check_for_duplicates(self, exclude_rejected=True):
if not self.embed_code and not self.file_url:
raise ValidationError("Video has no embed code or file url.")
qs = Video.objects.filter(site=self.site_id)
if exclude_rejected:
qs = qs.exclude(status=Video.REJECTED)
if self.pk is not None:
qs = qs.exclude(pk=self.pk)
if self.guid and qs.filter(guid=self.guid).exists():
raise ValidationError("Another video with the same guid "
"already exists.")
if (self.website_url and
qs.filter(website_url=self.website_url).exists()):
raise ValidationError("Another video with the same website url "
"already exists.")
if self.file_url and qs.filter(file_url=self.file_url).exists():
raise ValidationError("Another video with the same file url "
"already exists.")
def clear_rejected_duplicates(self):
"""
Deletes rejected copies of this video based on the file_url,
website_url, and guid fields.
"""
if not any((self.website_url, self.file_url, self.guid)):
return
q_filter = models.Q()
if self.website_url:
q_filter |= models.Q(website_url=self.website_url)
if self.file_url:
q_filter |= models.Q(file_url=self.file_url)
if self.guid:
q_filter |= models.Q(guid=self.guid)
qs = Video.objects.filter(
site=self.site_id,
status=Video.REJECTED).filter(q_filter)
qs.delete()
@models.permalink
def get_absolute_url(self):
return ('localtv_view_video', (),
{'video_id': self.id,
'slug': slugify(self.name)[:30]})
def save(self, **kwargs):
"""
Adds support for an ```update_index`` kwarg, defaulting to ``True``.
If this kwarg is ``False``, then no index updates will be run by the
search index.
"""
# This actually relies on logic in
# :meth:`QueuedSearchIndex._enqueue_instance`
self._update_index = kwargs.pop('update_index', True)
super(Video, self).save(**kwargs)
save.alters_data = True
@classmethod
def from_vidscraper_video(cls, video, status=None, commit=True,
source_import=None, site_pk=None, authors=None,
categories=None, update_index=True):
"""
Builds a :class:`Video` instance from a
:class:`vidscraper.videos.Video` instance. If `commit` is False,
the :class:`Video` will not be saved, and the created instance will have
a `save_m2m()` method that must be called after you call `save()`.
"""
video_file = video.get_file()
if video_file and video_file.expires is None:
file_url = video_file.url
else:
file_url = None
if status is None:
status = cls.UNAPPROVED
if site_pk is None:
site_pk = settings.SITE_ID
now = datetime.datetime.now()
instance = cls(
guid=video.guid or '',
name=video.title or '',
description=video.description or '',
website_url=video.link or '',
when_published=video.publish_datetime,
file_url=file_url or '',
file_url_mimetype=getattr(video_file, 'mime_type', '') or '',
file_url_length=getattr(video_file, 'length', None),
when_submitted=now,
when_approved=now if status == cls.ACTIVE else None,
status=status,
thumbnail_url=video.thumbnail_url or '',
embed_code=video.embed_code or '',
flash_enclosure_url=video.flash_enclosure_url or '',
video_service_user=video.user or '',
video_service_url=video.user_url or '',
site_id=site_pk
)
if instance.description:
soup = BeautifulSoup(video.description)
for tag in soup.find_all(
'div', {'class': "miro-community-description"}):
instance.description = unicode(tag)
break
instance.description = sanitize(instance.description,
extra_filters=['img'])
instance._vidscraper_video = video
if source_import is not None:
source_import.set_video_source(instance)
def save_m2m():
if authors:
instance.authors = authors
if video.user:
name = video.user
if ' ' in name:
first, last = name.split(' ', 1)
else:
first, last = name, ''
author, created = User.objects.get_or_create(
username=name[:30],
defaults={'first_name': first[:30],
'last_name': last[:30]})
if created:
author.set_unusable_password()
author.save()
utils.get_profile_model()._default_manager.create(
user=author, website=video.user_url or '')
instance.authors.add(author)
if categories:
instance.categories = categories
if video.tags:
if settings.FORCE_LOWERCASE_TAGS:
fix = lambda t: t.lower().strip()
else:
fix = lambda t: t.strip()
tags = set(fix(tag) for tag in video.tags if tag.strip())
for tag_name in tags:
tag, created = \
tagging.models.Tag._default_manager.get_or_create(name=tag_name)
tagging.models.TaggedItem._default_manager.create(
tag=tag, object=instance)
if source_import is not None:
source_import.handle_video(instance, video)
post_video_from_vidscraper.send(sender=cls, instance=instance,
vidscraper_video=video)
if update_index:
using = connection_router.for_write()
index = connections[using].get_unified_index().get_index(cls)
index._enqueue_update(instance)
if commit:
instance.save(update_index=False)
save_m2m()
else:
instance.save_m2m = save_m2m
return instance
def get_tags(self):
if self.pk is None:
vidscraper_video = getattr(self, '_vidscraper_video', None)
return getattr(vidscraper_video, 'tags', None) or []
if (hasattr(self, '_prefetched_objects_cache') and
'taggeditem_set' in self._prefetched_objects_cache):
return [item.tag for item in
self._prefetched_objects_cache['taggeditem_set']]
return self.tags
def try_to_get_file_url_data(self):
"""
Do a HEAD request on self.file_url to find information about
self.file_url_length and self.file_url_mimetype
Note that while this method fills in those attributes, it does *NOT*
run self.save() ... so be sure to do so after calling this method!
"""
if not self.file_url:
return
request = urllib2.Request(utils.quote_unicode_url(self.file_url))
request.get_method = lambda: 'HEAD'
try:
http_file = urllib2.urlopen(request, timeout=5)
except Exception:
pass
else:
self.file_url_length = http_file.headers.get('content-length')
self.file_url_mimetype = http_file.headers.get('content-type', '')
if self.file_url_mimetype in ('application/octet-stream', ''):
# We got a not-useful MIME type; guess!
guess = mimetypes.guess_type(self.file_url)
if guess[0] is not None:
self.file_url_mimetype = guess[0]
def submitter(self):
"""
Return the user that submitted this video. If necessary, use the
submitter from the originating feed or savedsearch.
"""
if self.user is not None:
return self.user
elif self.feed is not None:
return self.feed.user
elif self.search is not None:
return self.search.user
else:
# XXX warning?
return None
def when(self):
"""
Simple method for getting the when_published date if the video came
from a feed or a search, otherwise the when_approved date.
"""
site_settings = SiteSettings.objects.get_cached(self.site_id,
self._state.db)
if site_settings.use_original_date and self.when_published:
return self.when_published
return self.when_approved or self.when_submitted
def source_type(self):
if self.id and self.search_id:
try:
return u'Search: %s' % self.search
except SavedSearch.DoesNotExist:
return u''
if self.id and self.feed_id:
try:
if self.feed.video_service():
return u'User: %s: %s' % (
self.feed.video_service(),
self.feed.name)
else:
return 'Feed: %s' % self.feed.name
except Feed.DoesNotExist:
return ''
if self.video_service_user:
return u'User: %s: %s' % (self.video_service(),
self.video_service_user)
return ''
def video_service(self):
if not self.website_url:
return
url = self.website_url
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, url, re.I):
return service
def when_prefix(self):
"""
When videos are bulk imported (from a feed or a search), we list the
date as "published", otherwise we show 'posted'.
"""
site_settings = SiteSettings.objects.get_cached(site=self.site_id,
using=self._state.db)
if self.when_published and site_settings.use_original_date:
return 'published'
else:
return 'posted'
@property
def all_categories(self):
"""
Returns a set of all the categories to which this video belongs.
"""
categories = self.categories.all()
if not categories:
return categories
q_list = []
opts = Category._mptt_meta
for category in categories:
l = {
'left__lte': getattr(category, opts.left_attr),
'right__gte': getattr(category, opts.right_attr),
'tree_id': getattr(category, opts.tree_id_attr)
}
l = Category._tree_manager._translate_lookups(**l)
q_list.append(models.Q(**l))
q = reduce(operator.or_, q_list)
return Category.objects.filter(q)
def pre_save_video_set_calculated_source_type(instance, **kwargs):
# Always recalculate the source_type field.
instance.calculated_source_type = instance.source_type()
models.signals.pre_save.connect(pre_save_video_set_calculated_source_type,
sender=Video)
class Watch(models.Model):
"""
Record of a video being watched.
fields:
- video: Video that was watched
- timestamp: when watched
- user: user that watched it, if any
- ip_address: IP address of the user
"""
video = models.ForeignKey(Video)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey('auth.User', blank=True, null=True)
ip_address = models.IPAddressField()
@classmethod
def add(Class, request, video):
"""
Adds a record of a watched video to the database. If the request came
from localhost, check to see if it was forwarded to (hopefully) get the
right IP address.
"""
ignored_bots = getattr(settings, 'LOCALTV_WATCH_IGNORED_USER_AGENTS',
('bot', 'spider', 'crawler'))
user_agent = request.META.get('HTTP_USER_AGENT', '').lower()
if user_agent and ignored_bots:
for bot in ignored_bots:
if bot in user_agent:
return
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if not ipv4_re.match(ip):
ip = '0.0.0.0'
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
try:
Class(video=video, user=user, ip_address=ip).save()
except Exception:
pass
class VideoModerator(CommentModerator):
def allow(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.comments_required_login:
return request.user and request.user.is_authenticated()
else:
return True
def email(self, comment, video, request):
# we do the import in the function because otherwise there's a circular
# dependency
from localtv.utils import send_notice
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': True})
subject = '[%s] New comment posted on "%s"' % (video.site.name,
video)
message = t.render(c)
send_notice('admin_new_comment', subject, message,
site_settings=site_settings)
admin_new_comment = notification.NoticeType.objects.get(
label="admin_new_comment")
if video.user and video.user.email:
video_comment = notification.NoticeType.objects.get(
label="video_comment")
if notification.should_send(video.user, video_comment, "1") and \
not notification.should_send(video.user,
admin_new_comment, "1"):
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[video.user.email]).send(fail_silently=True)
comment_post_comment = notification.NoticeType.objects.get(
label="comment_post_comment")
previous_users = set()
for previous_comment in comment.__class__.objects.filter(
content_type=comment.content_type,
object_pk=video.pk,
is_public=True,
is_removed=False,
submit_date__lte=comment.submit_date,
user__email__isnull=False).exclude(
user__email='').exclude(pk=comment.pk):
if (previous_comment.user not in previous_users and
notification.should_send(previous_comment.user,
comment_post_comment, "1") and
not notification.should_send(previous_comment.user,
admin_new_comment, "1")):
previous_users.add(previous_comment.user)
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[previous_comment.user.email]).send(fail_silently=True)
def moderate(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.screen_all_comments:
if not getattr(request, 'user'):
return True
else:
return not site_settings.user_is_admin(request.user)
else:
return False
moderator.register(Video, VideoModerator)
tagging.register(Video)
def finished(sender, **kwargs):
SiteSettings.objects.clear_cache()
request_finished.connect(finished)
def tag_unicode(self):
# hack to make sure that Unicode data gets returned for all tags
if isinstance(self.name, str):
self.name = self.name.decode('utf8')
return self.name
tagging.models.Tag.__unicode__ = tag_unicode
def send_new_video_email(sender, **kwargs):
site_settings = SiteSettings.objects.get_cached(site=sender.site_id,
using=sender._state.db)
if sender.status == Video.ACTIVE:
# don't send the e-mail for videos that are already active
return
t = loader.get_template('localtv/submit_video/new_video_email.txt')
c = Context({'video': sender})
message = t.render(c)
subject = '[%s] New Video in Review Queue: %s' % (sender.site.name,
sender)
utils.send_notice('admin_new_submission',
subject, message,
site_settings=site_settings)
submit_finished.connect(send_new_video_email, weak=False)
def create_email_notices(app, created_models, verbosity, **kwargs):
notification.create_notice_type('video_comment',
'New comment on your video',
'Someone commented on your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('comment_post_comment',
'New comment after your comment',
'Someone commented on a video after you',
default=2,
verbosity=verbosity)
notification.create_notice_type('video_approved',
'Your video was approved',
'An admin approved your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('admin_new_comment',
'New comment',
'A comment was submitted to the site',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_submission',
'New Submission',
'A new video was submitted',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_weekly',
'Weekly Queue Update',
'A weekly e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_daily',
'Daily Queue Update',
'A daily e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_video_updated',
'Video Updated',
'A video from a service was updated',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_playlist',
'Request for Playlist Moderation',
'A new playlist asked to be public',
default=2,
verbosity=verbosity)
models.signals.post_syncdb.connect(create_email_notices)
def delete_comments(sender, instance, **kwargs):
from django.contrib.comments import get_model
get_model().objects.filter(
object_pk=instance.pk,
content_type__app_label='localtv',
content_type__model='video'
).delete()
models.signals.pre_delete.connect(delete_comments,
sender=Video)
| pculture/mirocommunity | localtv/models.py | Python | agpl-3.0 | 55,425 |
import sys
def exitApp():
print "Thank you for using the scorer app"
sys.exit()
| amangupta53/scorer.py | scorer/system.py | Python | gpl-2.0 | 90 |
import hashlib
import urllib2
from test_services import GET
from akara import pipeline
def test_pipeline_missing_stages():
for stages in (None, [], ()):
try:
pipeline.register_pipeline("blah", stages=stages)
except TypeError:
pass
else:
raise AssertError("allowed missing stages: %r" % stages)
def test_flatten():
result = list(pipeline._flatten_kwargs_values(dict(a=["1","2","3"])))
assert result == [("a","1"), ("a","2"), ("a","3")], result
result = list(pipeline._flatten_kwargs_values(dict(a=["1","2","3"], b="9")))
result.sort()
assert result == [("a","1"), ("a","2"), ("a","3"), ("b","9")], result
def test_stage_query_args():
stage = pipeline.Stage("http://example.com", [("a", ["1", "2"]), ("b", "9")])
assert stage.query_string == "a=1&a=2&b=9", stage.query_string
def test_stage_kwargs():
stage = pipeline.Stage("http://example.com", a=["1", "2"], b="9")
assert (stage.query_string == "a=1&a=2&b=9" or
stage.query_string == "b=9&a=1&a=2"), stage.query_string
def test_stage_raw_query():
stage = pipeline.Stage("http://example.com", query_string="=j")
assert stage.query_string == "=j"
def test_stage_error_combinations():
# Not allowed to mix inputs
def t1():
pipeline.Stage("http://example.com", [("a", "b")], query_string="=j")
def t2():
pipeline.Stage("http://example.com", [("a", "b")], a=3)
def t3():
pipeline.Stage("http://example.com", query_string="=j", a=3)
for t in (t1, t2, t2):
try:
t()
except TypeError:
pass
else:
raise AssertionError("expected to fail")
def test_hash_encode():
result = GET("hash_encode", data="This is a test")
expected = hashlib.md5("secretThis is a test").digest().encode("base64")
assert result == expected, (result, expected)
def test_hash_encode_rot13():
result = GET("hash_encode_rot13", data="This is another test")
expected = hashlib.md5("secretThis is another test").digest().encode("base64").encode("rot13")
assert result == expected, (result, expected)
def test_get_hash():
result = GET("get_hash")
expected = hashlib.md5("Andrew").digest().encode("base64")
assert result == expected, (result, expected)
def test_get_hash2():
result = GET("get_hash", dict(text="Sara Marie"))
expected = hashlib.md5("Sara Marie").digest().encode("base64")
assert result == expected, (result, expected)
def test_broken_pipeline1():
try:
result = GET("broken_pipeline1")
raise AssertionError("should not get here")
except urllib2.HTTPError, err:
assert err.code == 500
msg = err.read()
assert "Broken internal pipeline" in msg, msg
def test_broken_pipeline2():
try:
result = GET("broken_pipeline2", data="feed the pipeline")
raise AssertionError("should not get here")
except urllib2.HTTPError, err:
assert err.code == 500, err.code
msg = err.read()
assert "Broken internal pipeline" in msg, msg
def test_registry_size():
result = GET("test_count_registry")
assert int(result) > 30, "What?! Did you remove elements from the registry?"
| uogbuji/akara | test/test_pipeline.py | Python | apache-2.0 | 3,268 |
from django.contrib.auth import views as auth_views
from django.urls import path
from django.views.generic import RedirectView
from . import views
urlpatterns = [
path('upload_view/', views.upload_view, name='upload_view'),
path('get_view/', views.get_view, name='get_view'),
path('post_view/', views.post_view),
path('put_view/', views.put_view),
path('trace_view/', views.trace_view),
path('header_view/', views.view_with_header),
path('raw_post_view/', views.raw_post_view),
path('redirect_view/', views.redirect_view),
path('redirect_view_307/', views.method_saving_307_redirect_view),
path(
'redirect_view_307_query_string/',
views.method_saving_307_redirect_query_string_view,
),
path('redirect_view_308/', views.method_saving_308_redirect_view),
path(
'redirect_view_308_query_string/',
views.method_saving_308_redirect_query_string_view,
),
path('secure_view/', views.view_with_secure),
path('permanent_redirect_view/', RedirectView.as_view(url='/get_view/', permanent=True)),
path('temporary_redirect_view/', RedirectView.as_view(url='/get_view/', permanent=False)),
path('http_redirect_view/', RedirectView.as_view(url='/secure_view/')),
path('https_redirect_view/', RedirectView.as_view(url='https://testserver/secure_view/')),
path('double_redirect_view/', views.double_redirect_view),
path('bad_view/', views.bad_view),
path('form_view/', views.form_view),
path('form_view_with_template/', views.form_view_with_template),
path('formset_view/', views.formset_view),
path('json_view/', views.json_view),
path('login_protected_view/', views.login_protected_view),
path('login_protected_method_view/', views.login_protected_method_view),
path('login_protected_view_custom_redirect/', views.login_protected_view_changed_redirect),
path('permission_protected_view/', views.permission_protected_view),
path('permission_protected_view_exception/', views.permission_protected_view_exception),
path('permission_protected_method_view/', views.permission_protected_method_view),
path('session_view/', views.session_view),
path('broken_view/', views.broken_view),
path('mail_sending_view/', views.mail_sending_view),
path('mass_mail_sending_view/', views.mass_mail_sending_view),
path('nesting_exception_view/', views.nesting_exception_view),
path('django_project_redirect/', views.django_project_redirect),
path('no_trailing_slash_external_redirect/', views.no_trailing_slash_external_redirect),
path('', views.index_view, name='index'), # Target for no_trailing_slash_external_redirect/ with follow=True
path('two_arg_exception/', views.two_arg_exception),
path('accounts/', RedirectView.as_view(url='login/')),
path('accounts/no_trailing_slash', RedirectView.as_view(url='login/')),
path('accounts/login/', auth_views.LoginView.as_view(template_name='login.html')),
path('accounts/logout/', auth_views.LogoutView.as_view()),
# Async views.
path('async_get_view/', views.async_get_view, name='async_get_view'),
]
| atul-bhouraskar/django | tests/test_client/urls.py | Python | bsd-3-clause | 3,134 |
import pygame, sys
from entityclasses import *
from compositeclasses import *
'''
Tamed Monster masterlist format:
'Name': [HP, atk, def, mus, foc, cla, rhy, <- base (1-9999, combined max 50000?)
HP, atk, def, mus, foc, cla, rhy] <- gain modifiers (1-10)
'''
masterlist_tm = {
'Kobold': [1500, 1550, 1350, 1100, 1200, 1250, 1200,
5,4,3,5,5,5,6]
}
'''
Wild Monster masterlist format:
'Name': [HP, atk, def, mus, foc, cla, rhy, <- base, no max
hits, proration, <- attack stats
[[command1, target1, condition1, conditionNum1, probability1] , [command2, target2, condition2, conditionNum2, probability2], ...]]
^ possible AI actions, list by priority
[!] Only generic command strings are attack, offspell, defspell, and debspell. [!]
[!] All other names straight up call the word as a spell [!]
[!] example AI: [!]
Suicide to damage enemy party if self HP is <30% (prob 100)
At turn 3, attack enemy with lowest hp (prob 100)
Use Healing Howl to heal ally with lowest HP (prob 75)
Use spell on random enemy, no condition (prob 60)
Attack someone from enemy party, no condition (prob 100)
[['suicide', 'enemy', 'selfHP<', '30', 100],
['attack', 'enemy', 'turn=', 3, 100],
['Healing Howl', 'ally', 'lowestHP', None, 60],
['offspell', 'enemy', 'HP>', 500, 60],
['attack', 'enemy', None, None, 100]]
again, an AI entry consists of this 5 tuple:
[ACTION, TARGET, CONDITION, CONDITION COMPARATE (if applicable), PROBABILITY]
'''
masterlist_wm = {
'Kedama': [1300, 450, 700, 300, 300, 300, 300,
4, 0.85, 25,
[['offspell', 'enemy', 'enemyHP>', 500, 60],
['attack', 'enemy', None, None, 100]]],
'Slime': [1400, 500, 750, 420, 400, 420, 400,
4, 0.85, 25,
[['offspell', 'enemy', 'enemyHP>', 500, 60],
['attack', 'enemy', None, None, 100]]],
'Wolf': [2100, 800, 975, 150, 150, 250, 250,
4, 0.85, 75,
[['offspell', 'enemy', 'enemyHP>', 500, 60],
#['Healing Howl', 'ally','lowestHP' ,None, 95],
['attack', 'enemy', None, None, 100]]],
'Husk': [3000, 1250, 1050, 500, 500, 500, 500,
4, 0.85, 250,
[['offspell', 'enemy', 'enemyHP>', 500, 60],
['attack', 'enemy', None, None, 100]]],
'Orthrus': [5000, 5000, 5000, 5000, 5000, 5000, 5000,
4, 0.85, 9001,
[['offspell', 'enemy', 'enemyHP>', 500, 60],
#['Healing Howl', 'self','HP<' ,2500, 60],
#['suicide', 'enemy', 'selfHP<', '20', 100],
['attack', 'enemy', None, None, 100]]]
}
'''
Item price masterlist format:
'Name': [buy, sell]
'''
masterlist_price = {
'Room': [30,0],
'Potion': [50, 25],
'Flute': [600, 300],
'Mouthpiece': [200, 100],
'Gria Auliet': [0, 50000]
}
'''
Item effect masterlist format:
'Name': ['one/aoe/col/row', [['eff1', [arg1]], ['eff2', [arg1, arg2]], ...]]
'''
masterlist_item = {
'Potion': ['one', [['rec_HP', [100]]]]
}
'''
Instrument masterlist format:
'Name':[hits, bhp, batk, bdef, bmus, bfoc, bcla, brhy, bng,
php, patk, pdef, pmus, pfoc, pcla, prhy, png,
type, atk multiplier, crit chance, crit multiplier, proration per hit,
{effects}]
'''
masterlist_instrument = {
'Flute': [8,0,60,0,30,0,0,0,0,
0,0,0,0,0,0,0,0,
'wind', 0.2, 2, 2.5, 0.9,
None],
'Gria Auliet': [9,15,150,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
'wind', 0.3, 5, 2.5, 0.95,
None]
}
'''
Accessory masterlist format:
'Name':[effect,
bhp, batk, bdef, bmus, bfoc, bcla, brhy, bng,
php, patk, pdef, pmus, pfoc, pcla, prhy, png]
'''
masterlist_accessory = {
'Mouthpiece': [None,0,60,0,30,0,0,0,0,
0,0,0,0,0,0,0,0]
}
'''
Spell masterlist format:
'Name': [cost, type, inst, target, [[eff1, [args]], [eff2, [args]], ...]]
[!] type = off/def/buf/deb [!]
[!] inst = wind/string/percussion
[!] target = one/row/col/aoe
'''
masterlist_spell = {
'Black Aria': [4, 'off', 'wind', 'aoe',
[['dmg_HP', [5, 'mus', 0.14]],
['apply_debuff', ['Poison', 25, 2]]]]
}
'''
Status masterlist format:
'Name': [type, [[eff1, [args]], [eff2, [args]], ...]]
[!] type = off/def/buf/deb [!]
'''
masterlist_status = {
'Poison': ['off', [['dmg_HP', [1, 'eHP', 0.05]]]],
'Paralysis': ['off', [['set_Paralysis', [True]]]]
}
masterlist_conductor = {
'Hanami Otozono': {
'HP': 5, 'atk': 5, 'def': 5,
'mus': 5,'foc': 5,'cla': 5,'rhy': 5,
'string': 8,'wind': 5,'percussion': 2
},
'Gir-Nas': {
'HP': 6, 'atk': 7, 'def': 7,
'mus': 5,'foc': 3,'cla': 3,'rhy': 6,
'string': 5,'wind': 3,'percussion': 8
}
}
def tamedMonster_Init(indexName):
tempdict = {}
tempdict['base'] = {
'HP': masterlist_tm[indexName][0], 'atk': masterlist_tm[indexName][1], 'def': masterlist_tm[indexName][2],
'mus': masterlist_tm[indexName][3], 'foc': masterlist_tm[indexName][4], 'cla': masterlist_tm[indexName][5], 'rhy': masterlist_tm[indexName][6],
'string': 1, 'wind': 1, 'percussion': 1
}
tempdict['curr'] = {
'HP': masterlist_tm[indexName][0], 'bond': 0,
'notegain': 2, 'notes': 4
}
tempdict['bonus'] = {
'bonusHP': 0, 'bonusatk': 0, 'bonusdef': 0,
'bonusmus': 0, 'bonusfoc': 0, 'bonuscla': 0, 'bonusrhy': 0,
'bonusnotegain': 0
}
tempdict['penalty'] = {
'penaltyHP': 0,'penaltyatk': 0,'penaltydef': 0,
'penaltymus': 0,'penaltyfoc': 0,'penaltycla': 0,'penaltyrhy': 0,
'penaltynotegain': 0
}
tempdict['gains'] = {
'HP': masterlist_tm[indexName][7],'atk': masterlist_tm[indexName][8],'def': masterlist_tm[indexName][9],
'mus': masterlist_tm[indexName][10],'foc': masterlist_tm[indexName][11],'cla': masterlist_tm[indexName][12],'rhy': masterlist_tm[indexName][13]
}
tempdict['tp'] = {
'tp': 0,'totaltp': 0,'tpprog': 0, 'nexttp': 100
}
return tempdict
def wildMonster_Init(indexName):
tempdict = {}
tempdict['base'] = {
'HP': masterlist_wm[indexName][0], 'atk': masterlist_wm[indexName][1], 'def': masterlist_wm[indexName][2],
'mus': masterlist_wm[indexName][3], 'foc': masterlist_wm[indexName][4], 'cla': masterlist_wm[indexName][5], 'rhy': masterlist_wm[indexName][6],
}
tempdict['curr'] = {
'HP': masterlist_wm[indexName][0], 'hits': masterlist_wm[indexName][7], 'proration': masterlist_wm[indexName][8], 'TP': masterlist_wm[indexName][9]
}
tempdict['bonus'] = {
'bonusHP': 0, 'bonusatk': 0, 'bonusdef': 0,
'bonusmus': 0, 'bonusfoc': 0, 'bonuscla': 0, 'bonusrhy': 0
}
tempdict['penalty'] = {
'penaltyHP': 0,'penaltyatk': 0,'penaltydef': 0,
'penaltymus': 0,'penaltyfoc': 0,'penaltycla': 0,'penaltyrhy': 0
}
return masterlist_wm[indexName][10], tempdict
def itemPrice_Init(indexName):
tempdict = {}
tempdict['buy'] = masterlist_price[indexName][0]
tempdict['sell'] = masterlist_price[indexName][1]
return tempdict
def consumableEffect_Init(indexName):
return masterlist_item[indexName][0], masterlist_item[indexName][1]
def instrument_Init(indexName):
tempdict = {}
tempdict['base'] = {
'hits': masterlist_instrument[indexName][0], 'type': masterlist_instrument[indexName][17],
'atkmult': masterlist_instrument[indexName][18],'critchance': masterlist_instrument[indexName][19], 'critmult': masterlist_instrument[indexName][20],
'proration': masterlist_instrument[indexName][21],'effects': masterlist_instrument[indexName][22]
}
tempdict['bonus'] = {
'bonusHP': masterlist_instrument[indexName][1], 'bonusatk': masterlist_instrument[indexName][2], 'bonusdef': masterlist_instrument[indexName][3],
'bonusmus': masterlist_instrument[indexName][4], 'bonusfoc': masterlist_instrument[indexName][5], 'bonuscla': masterlist_instrument[indexName][6], 'bonusrhy': masterlist_instrument[indexName][7],
'bonusnotegain': masterlist_instrument[indexName][8]
}
tempdict['penalty'] = {
'penaltyHP': masterlist_instrument[indexName][9],'penaltyatk': masterlist_instrument[indexName][10],'penaltydef': masterlist_instrument[indexName][11],
'penaltymus': masterlist_instrument[indexName][12],'penaltyfoc': masterlist_instrument[indexName][13],'penaltycla': masterlist_instrument[indexName][14],'penaltyrhy': masterlist_instrument[indexName][15],
'penaltynotegain': masterlist_instrument[indexName][16]
}
return tempdict
def accessory_Init(indexName):
tempdict = {}
tempdict['effect'] = masterlist_accessory[indexName][0]
tempdict['bonus'] = {
'bonusHP': masterlist_accessory[indexName][1], 'bonusatk': masterlist_accessory[indexName][2], 'bonusdef': masterlist_accessory[indexName][3],
'bonusmus': masterlist_accessory[indexName][4], 'bonusfoc': masterlist_accessory[indexName][5], 'bonuscla': masterlist_accessory[indexName][6], 'bonusrhy': masterlist_accessory[indexName][7],
'bonusnotegain': masterlist_accessory[indexName][8]
}
tempdict['penalty'] = {
'penaltyHP': masterlist_accessory[indexName][9],'penaltyatk': masterlist_accessory[indexName][10],'penaltydef': masterlist_accessory[indexName][11],
'penaltymus': masterlist_accessory[indexName][12],'penaltyfoc': masterlist_accessory[indexName][13],'penaltycla': masterlist_accessory[indexName][14],'penaltyrhy': masterlist_accessory[indexName][15],
'penaltynotegain': masterlist_accessory[indexName][16]
}
return tempdict
def spell_Init(indexName):
return masterlist_spell[indexName][0], masterlist_spell[indexName][1], masterlist_spell[indexName][2], masterlist_spell[indexName][3], masterlist_spell[indexName][4]
def status_Init(indexName):
return masterlist_status[indexName][0], masterlist_status[indexName][1]
def conductor_Init(indexName):
return masterlist_conductor[indexName] | 2Guys1Python/Project-Cacophonum | data/init.py | Python | mit | 9,463 |
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.template import TemplateSyntaxError, RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
from content_ext.markup.forms import MarkupContentAdminForm
def restructuredtext(value):
"""
parse restructured text
"""
try:
from docutils.core import publish_parts
from docutils.parsers.rst import directives, Directive
from content_ext.markup.directives.code import CodeHighlight
except ImportError:
if settings.DEBUG:
raise TemplateSyntaxError("Error in content type: The Python "
"docutils library isn't installed.")
return force_unicode(value)
else:
docutils_settings = getattr(settings,
'RESTRUCTUREDTEXT_FILTER_SETTINGS', {},)
if settings.MARKUP_CODE_HIGHTLIGHT:
directives.register_directive('code', CodeHighlight)
parts = publish_parts(source=smart_str(value), writer_name="html4css1",
settings_overrides=docutils_settings,)
return mark_safe(force_unicode(parts["html_body"]))
def markdown(value):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown2:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
try:
import markdown
except ImportError:
if settings.DEBUG:
raise TemplateSyntaxError("Error in content type: The "
"Markdown library isn't installed.")
return force_unicode(value)
else:
def parse_extra(extra):
if ':' not in extra:
return (extra, {})
name, values = extra.split(':', 1)
values = dict((str(val.strip()), True) for val in \
values.split('|'))
return (name.strip(), values)
extensions = ['']
if settings.MARKUP_CODE_HIGHTLIGHT:
extensions =['codehilite(force_linenos=True)']
#extras = (e.strip() for e in arg.split(','))
#extras = dict(parse_extra(e) for e in extras if e)
#if 'safe' in extras:
# del extras['safe']
# safe_mode = True
#else:
# safe_mode = False
return mark_safe(markdown.markdown(force_unicode(value), extensions))
def textile(value):
"""
parse textile text
"""
try:
import textile
except ImportError:
if settings.DEBUG:
raise TemplateSyntaxError("Error in content type: The "
"Python textile library isn't installed.")
return force_unicode(value)
else:
return mark_safe(force_unicode(textile.textile(smart_str(value), \
encoding='utf-8', output='utf-8')))
class MarkupContent(models.Model):
"""
implenment restructured text
"""
MARKUP_CHOICE = (
('rst', 'restructure text'),
('markdown', 'markdown'),
('textile', 'textile')
)
markup = models.TextField(_("Markup Text"), blank=False)
markup_type = models.CharField(max_length=10, blank=False,
choices=MARKUP_CHOICE)
markup_html = models.TextField( blank=False)
form = MarkupContentAdminForm
feincms_item_editor_form = MarkupContentAdminForm
#feincms_item_editor_context_processors = (
# lambda x: dict(MARKITUP_JS_URL = settings.MARKITUP_JS_URL),
#)
#feincms_item_editor_includes = {
# 'head': [ 'settings.MARKITUP_CONFIG_URL', ],
#}
class Meta:
abstract = True
verbose_name = _('Markup')
verbose_name_plural = _('Markup')
def save(self, *args, **kwargs):
self.markup_html = self.parser(self.markup, self.markup_type)
return super(MarkupContent, self).save(*args, **kwargs)
def parser(self, value, type=None):
if type == 'rst':
convert = restructuredtext(value)
elif type == 'markdown':
convert = markdown(value)
elif type == 'textile':
convert = textile(value)
return convert
def render(self, **kwargs):
request = kwargs.get('request')
return render_to_string('content_markup/default.html',
{'content': self}, context_instance=RequestContext(request))
| indexofire/gravoicy | gravoicy/apps/content_ext/markup/models.py | Python | bsd-3-clause | 4,746 |
import sys
import csv
from gene_positions import chrom
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Scatter plot of weights in edgelist.')
parser.add_argument('A', type=argparse.FileType('r'))
parser.add_argument('B', type=argparse.FileType('r'))
#parser.add_argument('--output', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
ra = csv.DictReader(args.A, delimiter="\t", quotechar='"')
A = {}
for e in ra:
A[(e['from'], e['to'])] = e['weight']
rb = csv.DictReader(args.B, delimiter="\t", quotechar='"')
B = {}
for e in rb:
B[(e['from'], e['to'])] = e['weight']
x = []
y = []
colors = []
for k in A.keys():
if k in B:
x.append(A[k])
y.append(B[k])
(s,t) = k
# source and target in the same chromosome
if chrom.get(s) == chrom.get(t):
colors.append('red')
else:
colors.append('grey')
#w = csv.writer(args.output, delimiter="\t")
x = np.array(x)
y = np.array(y)
#area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
area =50
plt.scatter(x, y, s=area, c=colors, alpha=0.4)
plt.show()
| CSB-IG/circos_rnaseq_tcga_brca | MI_differential/scatter.py | Python | gpl-3.0 | 1,199 |
from django import forms
from edc_consent.forms import BaseSubjectConsentForm
from ..models import MaternalConsent
class MaternalConsentForm (BaseSubjectConsentForm):
def clean(self):
cleaned_data = self.cleaned_data
consent = MaternalConsent.objects.filter(identity=cleaned_data.get('identity'))
if consent:
raise forms.ValidationError("This identity number already exists")
return cleaned_data
class Meta:
model = MaternalConsent
| botswana-harvard/edc-bhp074 | bhp074/apps/eit_maternal/forms/maternal_consent_form.py | Python | gpl-2.0 | 500 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['BestCycle'] , ['SVR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_BestCycle_SVR.py | Python | bsd-3-clause | 158 |
"""
observersimple.py
An implementation of the Observer Design Patterns
http://www.codeskulptor.org/#user41_SDGV3w1Qcj_1.py
"""
class Publisher: # Observable
"""registers subscribers to notify"""
def __init__(self):
self._subscribers = dict()
def register(self, subscriber, callback = None):
self._subscribers[subscriber] = callback
def unregister(self, subscriber):
del self._subscribers[subscriber]
def notify(self, args):
for subscriber, callback in self._subscribers.items():
if callback is None:
subscriber.update(args)
else:
callback(args)
| ReblochonMasque/codeskulptor_projects | observer_pattern/observersimple.py | Python | mit | 668 |
"""
WSGI config for uptee project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "uptee.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| upTee/upTee | uptee/wsgi.py | Python | bsd-3-clause | 1,132 |
# Copyright (c) 2015, Forschungszentrum Jülich GmbH
# All rights reserved.
# Contributors: Jonas Bühler, Daniel Pflugfelder, Siegfried Jahnke
# Address: Institute of Bio- and Geosciences, Plant Sciences (IBG-2), Forschungszentrum Jülich GmbH, 52428 Jülich, Germany
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from mevis import *
def init():
ctx.field("Bypass.noBypass").setBoolValue(True)
ctx.field("isUpToDate").setBoolValue(False)
return
def autoUpdateChanged():
autoUpdate = ctx.field("autoUpdate").value
isUpToDate = ctx.field("isUpToDate").value
if autoUpdate and not isUpToDate:
update()
return
def inputChanged():
ctx.field("isUpToDate").setBoolValue(False)
autoUpdate = ctx.field("autoUpdate").value
if autoUpdate:
update()
return
def update():
# set kernel of convolution module
vs_x = ctx.field("Info.voxelSizeX").value
vs_y = ctx.field("Info.voxelSizeY").value
d = str(0.5 * 0.9481 * np.sqrt(vs_x*vs_x + vs_y*vs_y))
x = str(0.5 * 0.9481 * vs_x)
y = str(0.5 * 0.9481 * vs_y)
kernel = "(*,0,0,0,0,0): " + d + ", " + y + ", " + d + "\n(*,1,0,0,0,0): " + x + ", 0, " + x + "\n(*,2,0,0,0,0): " + d + ", " + y + ", " + d
ctx.field("Convolution.externalKernel").setStringValue(kernel)
# enable bypass
ctx.field("Bypass.noBypass").setBoolValue(False)
MLAB.processEvents()
########################################
# calculate skel length
########################################
conv_ml = ctx.field("Convolution.output0").image()
mask_ml = ctx.field("ImagePropertyConvert.output0").image()
if conv_ml and mask_ml:
conv = conv_ml.getTile((0,0,0,0,0,0), conv_ml.imageExtent())
mask = mask_ml.getTile((0,0,0,0,0,0), mask_ml.imageExtent())
#calc totalLength
totalLength = np.sum(conv[mask==1.0])
ctx.field("totalSkeletonLength").setDoubleValue(totalLength)
# set isUpToDate to True
ctx.field("isUpToDate").setBoolValue(True )
# disable bypass
ctx.field("Bypass.noBypass").setBoolValue(True)
return
| ForschungszentrumJuelich/phenoVein | General/Modules/Macros/FZJSkeletonTotalLength/SkeletonTotalLength.py | Python | bsd-3-clause | 3,535 |
# -*- coding: utf-8 -*-
{
'name': "BestJa: Volunteer (UCW)",
'summary': "Volunteer Profile (UCW)",
'description': """
Volunteer Profile modification for UCW
=========================
Hides a couple of fields, and adds a new "UW status" field.
""",
'author': "Laboratorium EE",
'website': "http://www.laboratorium.ee",
'depends': [
'bestja_volunteer',
'bestja_offers',
],
'data': [
'data/wishes.xml',
'data/uw_status.xml',
'views/volunteer.xml',
'views/offer.xml',
'messages.xml',
'views/application.xml',
'security/ir.model.access.csv',
],
}
| EE/bestja | addons/bestja_volunteer_ucw/__openerp__.py | Python | agpl-3.0 | 650 |
# -*- coding: utf-8 -*-
"""
"""
# Author: Dean Serenevy <[email protected]>
# This software is Copyright (c) 2014 APCI, LLC.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, absolute_import, print_function, unicode_literals
__all__ = '''
Parameter
CmdParameter OutputBitParameter
BasicParameter BasicQueryParameter EqParameter
SSIParameter
AxisParameter AxisQueryParameter AxisMaskParameter
IndexedParameter VectorParameter NetworkParameter
'''.split()
import struct
import Galil as ExternalGalil
from .galil import Galil
class Parameter(object):
def __init__(self, galil, name, value=None, axes=None, filename=None, lineno=None):
self.galil = galil
self.name = name
self.value = value
self.axes = axes
self.filename = filename
self.lineno = lineno
def __str__(self):
if self.value is not None:
return self.set_cmd(self.value)
else:
raise Exception("Value not defined for parameter {} of type {}".format(self.name, type(self)))
def check(self, value=None):
value = self.value if value is None else value
curr_value = self.get(refresh=False)
return self.cmp(curr_value, value)
def cmp(self, curr_value, value):
# String equality is great!
if str(value) == str(curr_value):
return True
# Else, try to interpret as numbers
try:
return Galil.round(value) == Galil.round(curr_value)
except ValueError:
return False
def get(self, refresh=True):
value = self.galil.command(self.get_cmd())
if refresh:
self.value = value
return value
def set(self, value, refresh=True):
if refresh:
self.value = str(value)
self.galil.command(self.set_cmd(value))
class CmdParameter(Parameter):
def __init__(self, galil, name, cmd=None, **kwargs):
super(CmdParameter,self).__init__(galil, name, **kwargs)
self.cmd = name if cmd is None else cmd
self.value = 1
def get_cmd(self):
return "MG1"
def set_cmd(self, value):
return self.cmd
class BasicParameter(Parameter):
def __init__(self, galil, name, cmd=None, **kwargs):
super(BasicParameter,self).__init__(galil, name, **kwargs)
self.cmd = name if cmd is None else cmd
def get_cmd(self):
return "MG_" + self.cmd
def set_cmd(self, value):
return "{} {}".format(self.cmd, value)
class BasicQueryParameter(BasicParameter):
def get_cmd(self):
return "{} ?".format(self.cmd)
class EqParameter(BasicParameter):
def set_cmd(self, value):
return "{}={}".format(self.cmd, value)
class AxisParameter(EqParameter):
def __init__(self, galil, name, axis, **kwargs):
super(AxisParameter,self).__init__(galil, name, cmd=(name + axis), **kwargs)
class AxisQueryParameter(AxisParameter):
def get_cmd(self):
return "{}=?".format(self.cmd)
class SSIParameter(AxisQueryParameter):
def get(self, refresh=True):
try:
value = self.galil.command(self.get_cmd())
value = [ x.strip() for x in value.split(',') ]
value = "{},{},{},{}<{}>{}".format(*value)
except ExternalGalil.CommandError:
value = 0
if refresh:
self.value = value
return value
class AxisMaskParameter(BasicParameter):
def __init__(self, galil, name, axes, **kwargs):
super(AxisMaskParameter,self).__init__(galil, name, axes=axes, **kwargs)
def get(self, refresh=True):
axes = [ a for a in self.axes if self.galil.commandValue(self.get_cmd(a)) ]
axes = self.join(axes)
if refresh:
self.value = axes
return axes
def get_cmd(self, axis):
return "MG_" + self.cmd + str(axis)
def join(self, items):
return "".join(str(x) for x in items) if items else "N"
class VectorParameter(AxisMaskParameter):
def __init__(self, galil, name, length, **kwargs):
kwargs['axes'] = range(length)
super(VectorParameter,self).__init__(galil, name, **kwargs)
def get(self, refresh=True):
axes = [ self.galil.commandValue(self.get_cmd(a)) for a in self.axes ]
axes = self.join(axes)
if refresh:
self.value = axes
return axes
def cmp(self, curr_value, value):
curr_value = curr_value.split(',')
value = value.split(',')
if len(curr_value) != len(value):
return False
for i in xrange(len(value)):
if not super(VectorParameter,self).cmp(curr_value[i], value[i]):
return False
return True
def join(self, items):
return ",".join(str(x) for x in items)
class NetworkParameter(BasicParameter):
@staticmethod
def int_to_csip(val):
return ",".join([ str(x) for x in reversed(struct.unpack_from(b"BBBB", struct.pack(b"i", val))) ])
def get(self, refresh=True):
value = self.int_to_csip(self.galil.commandValue(self.get_cmd()))
if refresh:
self.value = value
return value
def get_cmd(self):
return "MG_" + self.name + "0"
class IndexedParameter(Parameter):
def __init__(self, galil, name, index, **kwargs):
super(IndexedParameter,self).__init__(galil, name, **kwargs)
self.index = index
def get_cmd(self):
return "MG_" + self.name + str(self.index)
def set_cmd(self, value):
return "{} {},{}".format(self.name, self.index, value)
class OutputBitParameter(Parameter):
def __init__(self, galil, name, index, **kwargs):
kwargs.setdefault("value", (1 if name == 'SB' else 0))
super(OutputBitParameter,self).__init__(galil, name, **kwargs)
self.index = index
def get_cmd(self):
return "MG@OUT[{}]".format(self.index)
def set_cmd(self, value):
return ("SB{}" if int(float(value)) else "CB{}").format(self.index)
| duelafn/python-galil-apci | galil_apci/parameters.py | Python | lgpl-3.0 | 6,619 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test configurations for nose
This module contains nose plugin hooks that configures Beam tests which
includes ValidatesRunner test and E2E integration test.
"""
from nose.plugins import Plugin
class BeamTestPlugin(Plugin):
"""A nose plugin for Beam testing that registers command line options
This plugin is registered through setuptools in entry_points.
"""
def options(self, parser, env):
"""Add '--test-pipeline-options' to command line option to avoid
unrecognized option error thrown by nose.
The value of this option will be processed by TestPipeline and used to
build customized pipeline for ValidatesRunner tests.
"""
parser.add_option('--test-pipeline-options',
action='store',
type=str,
help='providing pipeline options to run tests on runner')
| amitsela/incubator-beam | sdks/python/test_config.py | Python | apache-2.0 | 1,649 |
# coding=utf-8
import os
import tempfile
import shutil
from django.core.files.uploadedfile import UploadedFile
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import resource_file_add_process, resource_post_create_actions
from hs_core.views.utils import create_folder, move_or_rename_file_or_folder
from hs_file_types.models import GenericLogicalFile, GeoRasterLogicalFile
class CompositeResourceTest(MockIRODSTestCaseMixin, TransactionTestCase):
def setUp(self):
super(CompositeResourceTest, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'[email protected]',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.temp_dir = tempfile.mkdtemp()
self.raster_file_name = 'small_logan.tif'
self.raster_file = 'hs_composite_resource/tests/data/{}'.format(self.raster_file_name)
self.generic_file_name = 'generic_file.txt'
self.generic_file = 'hs_composite_resource/tests/data/{}'.format(self.generic_file_name)
target_temp_raster_file = os.path.join(self.temp_dir, self.raster_file_name)
shutil.copy(self.raster_file, target_temp_raster_file)
self.raster_file_obj = open(target_temp_raster_file, 'r')
target_temp_generic_file = os.path.join(self.temp_dir, self.generic_file_name)
shutil.copy(self.generic_file, target_temp_generic_file)
self.generic_file_obj = open(target_temp_generic_file, 'r')
def tearDown(self):
super(CompositeResourceTest, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_create_composite_resource(self):
# test that we can create a composite resource
# there should not be any resource at this point
self.assertEqual(BaseResource.objects.count(), 0)
self._create_composite_resource()
# there should be one resource at this point
self.assertEqual(BaseResource.objects.count(), 1)
self.assertEqual(self.composite_resource.resource_type, "CompositeResource")
def test_create_composite_resource_with_file_upload(self):
# test that when we create composite resource with an uploaded file, then the uploaded file
# is automatically set to genericlogicalfile type
self.assertEqual(BaseResource.objects.count(), 0)
self.raster_file_obj = open(self.raster_file, 'r')
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Raster File Metadata',
files=(self.raster_file_obj,)
)
# there should not be aby GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# set the logical file
resource_post_create_actions(resource=self.composite_resource, user=self.user,
metadata=self.composite_resource.metadata)
# there should be one resource at this point
self.assertEqual(BaseResource.objects.count(), 1)
self.assertEqual(self.composite_resource.resource_type, "CompositeResource")
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# there should be 1 GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 1)
def test_file_add_to_composite_resource(self):
# test that when we add file to an existing composite resource, the added file
# automatically set to genericlogicalfile type
self.assertEqual(BaseResource.objects.count(), 0)
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
# there should not be any GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# add a file to the resource
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
# there should be one resource at this point
self.assertEqual(BaseResource.objects.count(), 1)
self.assertEqual(self.composite_resource.resource_type, "CompositeResource")
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# there should be 1 GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 1)
def test_core_metadata_CRUD(self):
"""test that all core metadata elements work for this resource type"""
self._create_composite_resource()
# test current metadata status of the composite resource
# there should be title element
self.assertEqual(self.composite_resource.metadata.title.value, "Test Composite Resource")
# there shouldn't be abstract element
self.assertEqual(self.composite_resource.metadata.description, None)
# there shouldn't be any format element
self.assertEqual(self.composite_resource.metadata.formats.count(), 0)
# there should be date element - 2 elements
self.assertEqual(self.composite_resource.metadata.dates.count(), 2)
# there should be 1 creator element
self.assertEqual(self.composite_resource.metadata.creators.count(), 1)
# there should not be any contributor element
self.assertEqual(self.composite_resource.metadata.contributors.count(), 0)
# there should not be any coverage element
self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
# there should not be any funding agency element
self.assertEqual(self.composite_resource.metadata.funding_agencies.count(), 0)
# there should be 1 identifier element
self.assertEqual(self.composite_resource.metadata.identifiers.count(), 1)
# there should be 1 language element
self.assertNotEqual(self.composite_resource.metadata.language, None)
# there should not be any publisher element
self.assertEqual(self.composite_resource.metadata.publisher, None)
# there should not be any format element
self.assertEqual(self.composite_resource.metadata.formats.count(), 0)
# there should not be any relation element
self.assertEqual(self.composite_resource.metadata.relations.count(), 0)
# there should be 1 rights element
self.assertNotEqual(self.composite_resource.metadata.rights, None)
# there shouldn't be any source element
self.assertEqual(self.composite_resource.metadata.sources.count(), 0)
# there should not be any subject elements
self.assertEqual(self.composite_resource.metadata.subjects.count(), 0)
# there should be 1 type element
self.assertNotEqual(self.composite_resource.metadata.type, None)
# there should not be any key/value metadata
self.assertEqual(self.composite_resource.extra_metadata, {})
# test create metadata
# create abstract
metadata = self.composite_resource.metadata
metadata.create_element('description', abstract='new abstract for the resource')
# there should be abstract element
self.assertNotEqual(self.composite_resource.metadata.description, None)
# add a file to the resource to auto create format element
self.raster_file_obj = open(self.raster_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
self.assertEqual(self.composite_resource.files.all().count(), 1)
# now there should be 1 format element
self.assertEqual(self.composite_resource.metadata.formats.count(), 1)
# add another creator
metadata.create_element('creator', name='John Smith')
# there should be 2 creators now
self.assertEqual(self.composite_resource.metadata.creators.count(), 2)
# add a contributor
metadata.create_element('contributor', name='Lisa Smith')
# there should be 1 contributor now
self.assertEqual(self.composite_resource.metadata.contributors.count(), 1)
# add a period type coverage
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'}
metadata.create_element('coverage', type='period', value=value_dict)
# add a point type coverage
value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'}
metadata.create_element('coverage', type='point', value=value_dict)
# there should be 2 coverage elements now
self.assertEqual(self.composite_resource.metadata.coverages.count(), 2)
cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first()
self.assertNotEqual(cov_pt, None)
cov_period = self.composite_resource.metadata.coverages.all().filter(type='period').first()
self.assertNotEqual(cov_period, None)
# add a funding agency element with only the required name value type
metadata.create_element('fundingagency', agency_name='NSF')
# there should be 1 funding agency element now
self.assertEqual(self.composite_resource.metadata.funding_agencies.count(), 1)
# add another identifier
metadata.create_element('identifier', name='someIdentifier', url="http://some.org/001")
# there should be 2 identifier elements
self.assertEqual(self.composite_resource.metadata.identifiers.count(), 2)
# add publisher element
publisher_CUAHSI = "Consortium of Universities for the Advancement of " \
"Hydrologic Science, Inc. (CUAHSI)"
url_CUAHSI = 'https://www.cuahsi.org'
# publisher element can be added when the resource is published
self.composite_resource.raccess.published = True
self.composite_resource.raccess.save()
# user can't set CUASHI as the publisher - when the resource has no content file
# first delete the content file
res_file = self.composite_resource.files.first()
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file.id,
self.user)
with self.assertRaises(Exception):
metadata.create_element('publisher', name=publisher_CUAHSI, url=url_CUAHSI)
metadata.create_element('publisher', name='USGS', url="http://usgs.gov")
# there should a publisher element now
self.assertNotEqual(self.composite_resource.metadata.publisher, None)
# add a relation element of uri type
metadata.create_element('relation', type='isPartOf',
value='http://hydroshare.org/resource/001')
# there should be 1 relation element
self.assertEqual(self.composite_resource.metadata.relations.count(), 1)
# add a source element of uri type
metadata.create_element('source', derived_from='http://hydroshare.org/resource/0001')
# there should be 1 source element
self.assertEqual(self.composite_resource.metadata.sources.count(), 1)
# add 2 subject elements
metadata.create_element('subject', value='sub-1')
metadata.create_element('subject', value='sub-2')
# there should be 2 subject elements
self.assertEqual(self.composite_resource.metadata.subjects.count(), 2)
# add key/value metadata
self.composite_resource.extra_metadata = {'key-1': 'value-1', 'key-2': 'value-2'}
self.composite_resource.save()
self.assertEqual(self.composite_resource.extra_metadata,
{'key-1': 'value-1', 'key-2': 'value-2'})
# test update metadata
# test update title
metadata.update_element('title', self.composite_resource.metadata.title.id,
value="New Title")
self.assertEqual(self.composite_resource.metadata.title.value, 'New Title')
# test update abstract
metadata.update_element('description',
self.composite_resource.metadata.description.id,
abstract='Updated composite resource')
self.assertEqual(self.composite_resource.metadata.description.abstract,
'Updated composite resource')
# test updating funding agency
agency_element = self.composite_resource.metadata.funding_agencies.all().filter(
agency_name='NSF').first()
metadata.update_element('fundingagency', agency_element.id,
award_title="Cyber Infrastructure",
award_number="NSF-101-20-6789",
agency_url="http://www.nsf.gov")
agency_element = self.composite_resource.metadata.funding_agencies.all().filter(
agency_name='NSF').first()
self.assertEquals(agency_element.agency_name, 'NSF')
self.assertEquals(agency_element.award_title, 'Cyber Infrastructure')
self.assertEquals(agency_element.award_number, 'NSF-101-20-6789')
self.assertEquals(agency_element.agency_url, 'http://www.nsf.gov')
some_idf = self.composite_resource.metadata.identifiers.all().filter(
name='someIdentifier').first()
metadata.update_element('identifier', some_idf.id, name='someOtherIdentifier')
some_idf = self.composite_resource.metadata.identifiers.all().filter(
name='someOtherIdentifier').first()
self.assertNotEqual(some_idf, None)
# update language
self.assertEqual(self.composite_resource.metadata.language.code, 'eng')
metadata.update_element('language',
self.composite_resource.metadata.language.id, code='fre')
self.assertEqual(self.composite_resource.metadata.language.code, 'fre')
# test that updating publisher element raises exception
with self.assertRaises(Exception):
metadata.update_element('publisher',
self.composite_resource.metadata.publisher.id,
name='USU', url="http://usu.edu")
# test update relation type
rel_to_update = self.composite_resource.metadata.relations.all().filter(
type='isPartOf').first()
metadata.update_element('relation', rel_to_update.id,
type='isVersionOf', value="dummy value 2")
rel_to_update = self.composite_resource.metadata.relations.all().filter(
type='isVersionOf').first()
self.assertEqual(rel_to_update.value, "dummy value 2")
src_1 = self.composite_resource.metadata.sources.all().filter(
derived_from='http://hydroshare.org/resource/0001').first()
metadata.update_element('source', src_1.id,
derived_from='http://hydroshare.org/resource/0002')
src_1 = self.composite_resource.metadata.sources.first()
self.assertEqual(src_1.derived_from, 'http://hydroshare.org/resource/0002')
# change the point coverage to type box
# since we deleted the content file, there should not be any coverage element
self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
# add a point type coverage
value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'}
metadata.create_element('coverage', type='point', value=value_dict)
value_dict = {'northlimit': '56.45678', 'eastlimit': '120.6789', 'southlimit': '16.45678',
'westlimit': '16.6789',
'units': 'decimal deg'}
cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first()
metadata.update_element('coverage', cov_pt.id, type='box', value=value_dict)
cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first()
self.assertEqual(cov_pt, None)
cov_box = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertNotEqual(cov_box, None)
# update creator
creator = self.composite_resource.metadata.creators.all().filter(name='John Smith').first()
self.assertEqual(creator.email, None)
metadata.update_element('creator', creator.id, email='[email protected]')
creator = self.composite_resource.metadata.creators.all().filter(name='John Smith').first()
self.assertEqual(creator.email, '[email protected]')
# update contributor
contributor = self.composite_resource.metadata.contributors.first()
self.assertEqual(contributor.email, None)
metadata.update_element('contributor', contributor.id, email='[email protected]')
contributor = self.composite_resource.metadata.contributors.first()
self.assertEqual(contributor.email, '[email protected]')
def test_metadata_xml(self):
"""test that the call to resource.get_metadata_xml() doesn't raise exception
for composite resource type get_metadata_xml() includes both resource
level metadata and file type metadata for each logical file objects within the resource
"""
# 1. create core metadata elements
# 2. create genericlogicalfile type metadata
# 3. create georasterlogicalfile type metadata
self._create_composite_resource()
# add a file to the resource to auto create format element
# as well as be able to add generic file type metadata
self.generic_file_obj = open(self.generic_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.generic_file_obj,), user=self.user)
# add a raster file to the resource to auto create format element
# as well as be able to add raster file type metadata
self.raster_file_obj = open(self.raster_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# add some core metadata
# create abstract
metadata = self.composite_resource.metadata
metadata.create_element('description', abstract='new abstract for the resource')
# add a contributor
metadata.create_element('contributor', name='Lisa Smith')
# add a funding agency element with only the required name value type
metadata.create_element('fundingagency', agency_name='NSF')
# add a relation element of uri type
metadata.create_element('relation', type='isPartOf',
value='http://hydroshare.org/resource/001')
# add a source element of uri type
metadata.create_element('source', derived_from='http://hydroshare.org/resource/0001')
# add 2 subject elements
metadata.create_element('subject', value='sub-1')
metadata.create_element('subject', value='sub-2')
# add key/value metadata
self.composite_resource.extra_metadata = {'key-1': 'value-1', 'key-2': 'value-2'}
self.composite_resource.save()
# add a publisher element
self.composite_resource.raccess.published = True
self.composite_resource.raccess.save()
publisher_CUAHSI = "Consortium of Universities for the Advancement of " \
"Hydrologic Science, Inc. (CUAHSI)"
url_CUAHSI = 'https://www.cuahsi.org'
metadata.create_element('publisher', name=publisher_CUAHSI, url=url_CUAHSI)
# add generic logical file type metadata
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GenericLogicalFile"][0]
gen_logical_file = res_file.logical_file
# add dataset name
self.assertEqual(gen_logical_file.dataset_name, None)
gen_logical_file.dataset_name = "This is a generic dataset"
gen_logical_file.save()
# add key/value metadata
gen_logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2'}
gen_logical_file.metadata.save()
# add temporal coverage
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'}
gen_logical_file.metadata.create_element('coverage', type='period', value=value_dict)
# add spatial coverage
value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'}
gen_logical_file.metadata.create_element('coverage', type='point', value=value_dict)
tif_res_file = [f for f in self.composite_resource.files.all()
if f.extension == ".tif"][0]
GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user)
# add generic logical file type metadata
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GeoRasterLogicalFile"][0]
raster_logical_file = res_file.logical_file
# check we have dataset name
self.assertEqual(raster_logical_file.dataset_name, "small_logan")
# add key/value metadata
raster_logical_file.metadata.extra_metadata = {'keyA': 'value A', 'keyB': 'value B'}
raster_logical_file.metadata.save()
# add temporal coverage
value_dict = {'name': 'Name for period coverage', 'start': '1/1/2010', 'end': '12/12/2016'}
raster_logical_file.metadata.create_element('coverage', type='period', value=value_dict)
# test no exception raised when generating the metadata xml for this resource type
try:
self.composite_resource.get_metadata_xml()
except Exception as ex:
self.fail("Failed to generate metadata in xml format. Error:{}".format(ex.message))
def test_resource_coverage_auto_update(self):
# this is to test that the spatial coverage and temporal coverage
# for composite resource get updated by the system based on the
# coverage metadata that all logical file objects of the resource have at anytime
# 1. test that resource coverages get updated on LFO level metadata creation
# 2. test that resource coverages get updated on LFO level metadata update
# 3. test that resource coverages get updated on content file delete
# create a composite resource with no content file
self._create_composite_resource()
# at this point the there should not be any resource level coverage metadata
self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
# now add the raster tif file to the resource - which should put this file as
# part of a GenericLogicalFile object
self.raster_file_obj = open(self.raster_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
res_file = self.composite_resource.files.all().first()
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# raster logical file should have a coverage element of type box
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GeoRasterLogicalFile"][0]
raster_logical_file = res_file.logical_file
self.assertEqual(raster_logical_file.metadata.coverages.count(), 1)
self.assertEqual(raster_logical_file.metadata.coverages.all().filter(
type='box').count(), 1)
# now the resource should have a coverage metadata element of type box
self.assertEqual(self.composite_resource.metadata.coverages.count(), 1)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(
type='box').count(), 1)
# the spatial coverage at the file type level should be exactly the same as the
# resource level - due to auto update feature in composite resource
res_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter(
type='box').first()
self.assertEqual(res_coverage.value['projection'], raster_lfo_coverage.value['projection'])
self.assertEqual(res_coverage.value['units'], raster_lfo_coverage.value['units'])
self.assertEqual(res_coverage.value['northlimit'], raster_lfo_coverage.value['northlimit'])
self.assertEqual(res_coverage.value['southlimit'], raster_lfo_coverage.value['southlimit'])
self.assertEqual(res_coverage.value['eastlimit'], raster_lfo_coverage.value['eastlimit'])
self.assertEqual(res_coverage.value['westlimit'], raster_lfo_coverage.value['westlimit'])
# At this point there is not temporal coverage either at the file type level or resource
# level
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(
type='period').count(), 0)
self.assertEqual(raster_logical_file.metadata.coverages.all().filter(
type='period').count(), 0)
# addding temporal coverage to the logical file should add the temporal coverage to the
# resource
value_dict = {'start': '1/1/2010', 'end': '12/12/2015'}
raster_logical_file.metadata.create_element('coverage', type='period', value=value_dict)
self.assertEqual(self.composite_resource.metadata.coverages.all().filter(
type='period').count(), 1)
self.assertEqual(raster_logical_file.metadata.coverages.all().filter(
type='period').count(), 1)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start'])
self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end'])
self.assertEqual(res_coverage.value['start'], '1/1/2010')
self.assertEqual(res_coverage.value['end'], '12/12/2015')
# test updating the temporal coverage for file type should update the temporal coverage
# for the resource
value_dict = {'start': '12/1/2010', 'end': '12/1/2015'}
raster_logical_file.metadata.update_element('coverage', raster_lfo_coverage.id,
type='period', value=value_dict)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start'])
self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end'])
self.assertEqual(res_coverage.value['start'], '12/1/2010')
self.assertEqual(res_coverage.value['end'], '12/1/2015')
# test that the resource coverage is superset of file type coverages
self.generic_file_obj = open(self.generic_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.generic_file_obj,), user=self.user)
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GenericLogicalFile"][0]
generic_logical_file = res_file.logical_file
# there should not be any coverage for the generic LFO at this point
self.assertEqual(generic_logical_file.metadata.coverages.count(), 0)
# create temporal coverage for generic LFO
value_dict = {'start': '1/1/2009', 'end': '1/1/2015'}
generic_logical_file.metadata.create_element('coverage', type='period', value=value_dict)
self.assertEqual(generic_logical_file.metadata.coverages.count(), 1)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
# resource temporal coverage is now super set of the 2 temporal coverages
# in 2 LFOs
self.assertEqual(res_coverage.value['start'], '1/1/2009')
self.assertEqual(res_coverage.value['end'], '12/1/2015')
# test resource superset spatial coverage
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='box').first()
self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(res_coverage.value['units'], 'Decimal degrees')
self.assertEqual(res_coverage.value['northlimit'], 42.049364058252266)
self.assertEqual(res_coverage.value['eastlimit'], -111.57773718106195)
self.assertEqual(res_coverage.value['southlimit'], 41.987884327209976)
self.assertEqual(res_coverage.value['westlimit'], -111.69756293084055)
value_dict = {'east': '-110.88845678', 'north': '43.6789', 'units': 'Decimal deg'}
generic_logical_file.metadata.create_element('coverage', type='point', value=value_dict)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='box').first()
self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(res_coverage.value['units'], 'Decimal degrees')
self.assertEqual(res_coverage.value['northlimit'], 43.6789)
self.assertEqual(res_coverage.value['eastlimit'], -110.88845678)
self.assertEqual(res_coverage.value['southlimit'], 41.987884327209976)
self.assertEqual(res_coverage.value['westlimit'], -111.69756293084055)
# update the LFO coverage to box type
value_dict = {'eastlimit': '-110.88845678', 'northlimit': '43.6789',
'westlimit': '-112.78967', 'southlimit': '40.12345',
'units': 'Decimal deg'}
lfo_spatial_coverage = generic_logical_file.metadata.spatial_coverage
generic_logical_file.metadata.update_element('coverage', lfo_spatial_coverage.id,
type='box', value=value_dict)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='box').first()
self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(res_coverage.value['units'], 'Decimal degrees')
self.assertEqual(res_coverage.value['northlimit'], 43.6789)
self.assertEqual(res_coverage.value['eastlimit'], -110.88845678)
self.assertEqual(res_coverage.value['southlimit'], 40.12345)
self.assertEqual(res_coverage.value['westlimit'], -112.78967)
# deleting the generic file should reset the coverage of the resource to that of the
# raster LFO
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GenericLogicalFile"][0]
hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user)
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='box').first()
raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter(
type='box').first()
self.assertEqual(res_coverage.value['projection'], raster_lfo_coverage.value['projection'])
self.assertEqual(res_coverage.value['units'], raster_lfo_coverage.value['units'])
self.assertEqual(res_coverage.value['northlimit'], raster_lfo_coverage.value['northlimit'])
self.assertEqual(res_coverage.value['southlimit'], raster_lfo_coverage.value['southlimit'])
self.assertEqual(res_coverage.value['eastlimit'], raster_lfo_coverage.value['eastlimit'])
self.assertEqual(res_coverage.value['westlimit'], raster_lfo_coverage.value['westlimit'])
res_coverage = self.composite_resource.metadata.coverages.all().filter(
type='period').first()
raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start'])
self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end'])
self.assertEqual(res_coverage.value['start'], '12/1/2010')
self.assertEqual(res_coverage.value['end'], '12/1/2015')
# deleting the remaining content file from resource should leave the resource
# with no coverage element
res_file = [f for f in self.composite_resource.files.all()
if f.logical_file_type_name == "GeoRasterLogicalFile"][0]
hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user)
self.assertEqual(self.composite_resource.files.count(), 0)
self.assertEqual(self.composite_resource.metadata.coverages.count(), 0)
def test_can_be_public_or_discoverable(self):
self._create_composite_resource()
# at this point resource can't be public or discoverable as some core metadata missing
self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False)
# add a text file
self.generic_file_obj = open(self.generic_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.generic_file_obj,), user=self.user)
# at this point still resource can't be public or discoverable - as some core metadata
# is missing
self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False)
# add a raster file to the resource to auto create format element
self.raster_file_obj = open(self.raster_file, 'r')
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
# at this point still resource can't be public or discoverable - as some core metadata
# is missing
self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False)
# there should be 3 required core metadata elements missing at this point
missing_elements = self.composite_resource.metadata.get_required_missing_elements()
self.assertEqual(len(missing_elements), 2)
self.assertIn('Abstract', missing_elements)
self.assertIn('Keywords', missing_elements)
# add the above missing elements
# create abstract
metadata = self.composite_resource.metadata
# add Abstract (element name is description)
metadata.create_element('description', abstract='new abstract for the resource')
# add keywords (element name is subject)
metadata.create_element('subject', value='sub-1')
# at this point resource can be public or discoverable
self.assertEqual(self.composite_resource.can_be_public_or_discoverable, True)
def test_supports_folder_creation(self):
"""Here we are testing the function supports_folder_creation()
"""
self._create_composite_resource()
# add a file to the resource which will be part of a GenericLogicalFile object
self._add_generic_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 1)
# we should be able to create this new folder
new_folder_path = "data/contents/my-new-folder"
self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_path), True)
# create the folder
create_folder(self.composite_resource.short_id, new_folder_path)
# now move the file to this new folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id,
'data/contents/' + self.generic_file_name,
new_folder_path + "/" + self.generic_file_name)
# test that we should be able to create a folder inside the folder that contains
# a resource file that is part of a Generic Logical file
new_folder_path = "data/contents/my-new-folder/another-folder"
self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_path), True)
# add a raster tif file to the resource which will be part of
# a GoeRasterLogicalFile object
self._add_raster_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 2)
# make the tif as part of the GeoRasterLogicalFile
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user)
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
self.assertTrue(tif_res_file.resource_file.name.endswith(
"/data/contents/small_logan/small_logan.tif"))
# test that creating a folder at "/data/contents/small_logan/" is not supported
# as that folder contains a resource file that's part of GeoRaster logical file
new_folder_path = "{}/data/contents/small_logan/my-new-folder"
new_folder_path = new_folder_path.format(self.composite_resource.short_id)
self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_path), False)
def test_supports_move_or_rename_file_or_folder(self):
"""here we are testing the function supports_move_or_rename_file_or_folder() of the
composite resource class"""
self._create_composite_resource()
# add a file to the resource which will be part of a GenericLogicalFile object
self._add_generic_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 1)
# test that we can rename the resource file that's part of the GenericLogical File
gen_res_file = self.composite_resource.files.first()
gen_res_file_basename = hydroshare.utils.get_resource_file_name_and_extension(
gen_res_file)[1]
self.assertEqual(self.generic_file_name, gen_res_file_basename)
src_full_path = self.composite_resource.short_id + 'data/contents/' + self.generic_file_name
tgt_full_path = self.composite_resource.short_id + 'data/contents/renamed_file.txt'
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), True)
# create a new folder so that we can test if the generic file can be moved there
# or not
new_folder_path = "data/contents/my-new-folder"
self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_path), True)
# create the folder
create_folder(self.composite_resource.short_id, new_folder_path)
# now move the file to this new folder
tgt_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder/' + \
self.generic_file_name
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), True)
# test that if a folder contains a resource file that's part of a GenericLogicalFile
# that folder can be renamed
# now move the file to this new folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id,
'data/contents/' + self.generic_file_name,
new_folder_path + "/" + self.generic_file_name)
# test rename folder
src_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder/'
tgt_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder-1/'
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), True)
# add a raster tif file to the resource which will be part of
# a GoeRasterLogicalFile object
self._add_raster_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 2)
# make the tif as part of the GeoRasterLogicalFile
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user)
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
self.assertTrue(tif_res_file.resource_file.name.endswith(
"/data/contents/small_logan/small_logan.tif"))
# test renaming of any files that are part of GeoRasterLogicalFile is not allowed
src_full_path = self.composite_resource.short_id + '/data/contents/small_logan/' + \
self.raster_file_name
tgt_full_path = self.composite_resource.short_id + \
'/data/contents/small_logan/small_logan_1.tif'
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), False)
# test rename folder that contains resource files that are part of the GeoRasterLogicalFile
# is allowed
src_full_path = self.composite_resource.short_id + '/data/contents/small_logan'
tgt_full_path = self.composite_resource.short_id + '/data/contents/small_logan_1'
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), True)
# test that we can't move a file to a folder that contains resource files that are part
# of GeoRasterLogicalFile object
src_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder/' + \
self.generic_file_name
tgt_full_path = self.composite_resource.short_id + '/data/contents/small_logan/' + \
self.generic_file_name
# this is the function we are testing
self.assertEqual(self.composite_resource.supports_rename_path(
src_full_path, tgt_full_path), False)
def test_supports_zip(self):
"""Here we are testing the function supports_zip()"""
self._create_composite_resource()
# test that a folder containing a resource file that's part of the GenericLogicalFile
# can be zipped
# add a file to the resource which will be part of a GenericLogicalFile object
self._add_generic_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 1)
new_folder_path = "data/contents/my-new-folder"
# create the folder
create_folder(self.composite_resource.short_id, new_folder_path)
# now move the file to this new folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id,
'data/contents/' + self.generic_file_name,
new_folder_path + "/" + self.generic_file_name)
folder_to_zip = self.composite_resource.short_id + '/data/contents/my-new-folder'
# test that we can zip the folder my_new_folder
self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True)
# test that a folder containing resource files that are part of the GeorasterLogicalFile
# can be zipped
self._add_raster_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 2)
# make the tif as part of the GeoRasterLogicalFile
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user)
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
# resource file exists in a new folder 'small_logan'
self.assertTrue(tif_res_file.resource_file.name.endswith(
"/data/contents/small_logan/small_logan.tif"))
folder_to_zip = self.composite_resource.short_id + '/data/contents/small_logan'
# test that we can zip the folder small_logan
self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True)
def test_supports_delete_original_folder_on_zip(self):
"""Here we are testing the function supports_delete_original_folder_on_zip() of the
composite resource class"""
self._create_composite_resource()
# test that a folder containing a resource file that's part of the GenericLogicalFile
# can be deleted after that folder gets zipped
# add a file to the resource which will be part of a GenericLogicalFile object
self._add_generic_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 1)
new_folder_path = "data/contents/my-new-folder"
# create the folder
create_folder(self.composite_resource.short_id, new_folder_path)
# now move the file to this new folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id,
'data/contents/' + self.generic_file_name,
new_folder_path + "/" + self.generic_file_name)
folder_to_zip = self.composite_resource.short_id + '/data/contents/my-new-folder'
# test that we can zip the folder my_new_folder
self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True)
# this is the function we are testing - my-new-folder can be deleted
self.assertEqual(self.composite_resource.supports_delete_folder_on_zip(
folder_to_zip), True)
# test that a folder containing a resource file that's part of the GeoRasterLogicalFile
# can't be deleted after that folder gets zipped
# add a file to the resource which will be part of a GeoRasterLogicalFile object
self._add_raster_file_to_resource()
self.assertEqual(self.composite_resource.files.count(), 2)
# make the tif as part of the GeoRasterLogicalFile
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user)
tif_res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
# resource file exists in a new folder 'small_logan'
self.assertTrue(tif_res_file.resource_file.name.endswith(
"/data/contents/small_logan/small_logan.tif"))
folder_to_zip = self.composite_resource.short_id + '/data/contents/small_logan'
# test that we can zip the folder my_new_folder
self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True)
# this is the function we are testing - small_logan folder can't be deleted
self.assertEqual(self.composite_resource.supports_delete_folder_on_zip(
folder_to_zip), False)
def _create_composite_resource(self):
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Composite Resource'
)
def _add_generic_file_to_resource(self):
self.generic_file_obj = UploadedFile(file=open(self.generic_file, 'rb'),
name=os.path.basename(self.generic_file))
resource_file_add_process(resource=self.composite_resource,
files=(self.generic_file_obj,), user=self.user)
def _add_raster_file_to_resource(self):
self.raster_file_obj = UploadedFile(file=open(self.raster_file, 'rb'),
name=os.path.basename(self.raster_file))
resource_file_add_process(resource=self.composite_resource,
files=(self.raster_file_obj,), user=self.user)
| FescueFungiShare/hydroshare | hs_composite_resource/tests/test_composite_resource.py | Python | bsd-3-clause | 49,698 |
from setuptools import setup
setup(name='joinmarket_core',
version='0.1',
description='Joinmarket library for Bitcoin coinjoins',
url='http://github.com/Joinmarket-Org/joinmarket',
#author='Flying Circus',
#author_email='[email protected]',
license='GPL',
packages=['joinmarket_core'],
install_requires=['libnacl',],
zip_safe=False) | AdamISZ/joinmarket_core | setup.py | Python | gpl-3.0 | 394 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017-2018, Antony Alekseyev <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: zabbix_map
author:
- "Antony Alekseyev (@Akint)"
short_description: Create/update/delete Zabbix maps
description:
- "This module allows you to create, modify and delete Zabbix map entries,
using Graphviz binaries and text description written in DOT language.
Nodes of the graph will become map elements and edges will become links between map elements.
See U(https://en.wikipedia.org/wiki/DOT_(graph_description_language)) and U(https://www.graphviz.org/) for details.
Inspired by U(http://blog.zabbix.com/maps-for-the-lazy/)."
- "The following extra node attributes are supported:
C(zbx_host) contains name of the host in Zabbix. Use this if desired type of map element is C(host).
C(zbx_group) contains name of the host group in Zabbix. Use this if desired type of map element is C(host group).
C(zbx_map) contains name of the map in Zabbix. Use this if desired type of map element is C(map).
C(zbx_label) contains label of map element.
C(zbx_image) contains name of the image used to display the element in default state.
C(zbx_image_disabled) contains name of the image used to display disabled map element.
C(zbx_image_maintenance) contains name of the image used to display map element in maintenance.
C(zbx_image_problem) contains name of the image used to display map element with problems.
C(zbx_url) contains map element URL in C(name:url) format.
More than one URL could be specified by adding a postfix (e.g., C(zbx_url1), C(zbx_url2))."
- "The following extra link attributes are supported:
C(zbx_draw_style) contains link line draw style. Possible values: C(line), C(bold), C(dotted), C(dashed).
C(zbx_trigger) contains name of the trigger used as a link indicator in C(host_name:trigger_name) format.
More than one trigger could be specified by adding a postfix (e.g., C(zbx_trigger1), C(zbx_trigger2)).
C(zbx_trigger_color) contains indicator color specified either as CSS3 name or as a hexadecimal code starting with C(#).
C(zbx_trigger_draw_style) contains indicator draw style. Possible values are the same as for C(zbx_draw_style)."
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.4"
- pydotplus
- webcolors
- Pillow
- Graphviz
version_added: "2.8"
options:
name:
description:
- Name of the map.
required: true
aliases: [ "map_name" ]
data:
description:
- Graph written in DOT language.
required: false
aliases: [ "dot_data" ]
state:
description:
- State of the map.
- On C(present), it will create if map does not exist or update the map if the associated data is different.
- On C(absent) will remove the map if it exists.
required: false
choices: ['present', 'absent']
default: "present"
width:
description:
- Width of the map.
required: false
default: 800
height:
description:
- Height of the map.
required: false
default: 600
margin:
description:
- Size of white space between map's borders and its elements.
required: false
default: 40
expand_problem:
description:
- Whether the problem trigger will be displayed for elements with a single problem.
required: false
type: bool
default: true
highlight:
description:
- Whether icon highlighting is enabled.
required: false
type: bool
default: true
label_type:
description:
- Map element label type.
required: false
choices: ['label', 'ip', 'name', 'status', 'nothing', 'custom']
default: "name"
default_image:
description:
- Name of the Zabbix image used to display the element if this element doesn't have the C(zbx_image) attribute defined.
required: false
aliases: [ "image" ]
extends_documentation_fragment:
- zabbix
'''
RETURN = ''' # '''
EXAMPLES = '''
###
### Example inventory:
# [web]
# web[01:03].example.com ansible_host=127.0.0.1
# [db]
# db.example.com ansible_host=127.0.0.1
# [backup]
# backup.example.com ansible_host=127.0.0.1
###
### Each inventory host is present in Zabbix with a matching name.
###
### Contents of 'map.j2':
# digraph G {
# graph [layout=dot splines=false overlap=scale]
# INTERNET [zbx_url="Google:https://google.com" zbx_image="Cloud_(96)"]
# {% for web_host in groups.web %}
# {% set web_loop = loop %}
# web{{ '%03d' % web_loop.index }} [zbx_host="{{ web_host }}"]
# INTERNET -> web{{ '%03d' % web_loop.index }} [zbx_trigger="{{ web_host }}:Zabbix agent on {HOST.NAME} is unreachable for 5 minutes"]
# {% for db_host in groups.db %}
# {% set db_loop = loop %}
# web{{ '%03d' % web_loop.index }} -> db{{ '%03d' % db_loop.index }}
# {% endfor %}
# {% endfor %}
# { rank=same
# {% for db_host in groups.db %}
# {% set db_loop = loop %}
# db{{ '%03d' % db_loop.index }} [zbx_host="{{ db_host }}"]
# {% for backup_host in groups.backup %}
# {% set backup_loop = loop %}
# db{{ '%03d' % db_loop.index }} -> backup{{ '%03d' % backup_loop.index }} [color="blue"]
# {% endfor %}
# {% endfor %}
# {% for backup_host in groups.backup %}
# {% set backup_loop = loop %}
# backup{{ '%03d' % backup_loop.index }} [zbx_host="{{ backup_host }}"]
# {% endfor %}
# }
# }
###
### Create Zabbix map "Demo Map" made of template 'map.j2'
- name: Create Zabbix map
zabbix_map:
server_url: http://zabbix.example.com
login_user: username
login_password: password
name: Demo map
state: present
data: "{{ lookup('template', 'map.j2') }}"
default_image: Server_(64)
expand_problem: no
highlight: no
label_type: label
delegate_to: localhost
run_once: yes
'''
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
import atexit
import base64
import traceback
from io import BytesIO
from operator import itemgetter
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
try:
import pydotplus
HAS_PYDOTPLUS = True
except ImportError:
PYDOT_IMP_ERR = traceback.format_exc()
HAS_PYDOTPLUS = False
try:
import webcolors
HAS_WEBCOLORS = True
except ImportError:
WEBCOLORS_IMP_ERR = traceback.format_exc()
HAS_WEBCOLORS = False
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
try:
from PIL import Image
HAS_PIL = True
except ImportError:
PIL_IMP_ERR = traceback.format_exc()
HAS_PIL = False
class Map():
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
self.map_name = module.params['name']
self.dot_data = module.params['data']
self.width = module.params['width']
self.height = module.params['height']
self.state = module.params['state']
self.default_image = module.params['default_image']
self.map_id = self._get_sysmap_id(self.map_name)
self.margin = module.params['margin']
self.expand_problem = module.params['expand_problem']
self.highlight = module.params['highlight']
self.label_type = module.params['label_type']
self.api_version = self._zapi.api_version()
self.selements_sort_keys = self._get_selements_sort_keys()
def _build_graph(self):
try:
graph_without_positions = pydotplus.graph_from_dot_data(self.dot_data)
dot_data_with_positions = graph_without_positions.create_dot()
graph_with_positions = pydotplus.graph_from_dot_data(dot_data_with_positions)
if graph_with_positions:
return graph_with_positions
except Exception as e:
self._module.fail_json(msg="Failed to build graph from DOT data: %s" % e)
def get_map_config(self):
if not self.dot_data:
self._module.fail_json(msg="'data' is mandatory with state 'present'")
graph = self._build_graph()
nodes = self._get_graph_nodes(graph)
edges = self._get_graph_edges(graph)
icon_ids = self._get_icon_ids()
map_config = {
'name': self.map_name,
'label_type': self._get_label_type_id(self.label_type),
'expandproblem': int(self.expand_problem),
'highlight': int(self.highlight),
'width': self.width,
'height': self.height,
'selements': self._get_selements(graph, nodes, icon_ids),
'links': self._get_links(nodes, edges),
}
return map_config
def _get_label_type_id(self, label_type):
label_type_ids = {
'label': 0,
'ip': 1,
'name': 2,
'status': 3,
'nothing': 4,
'custom': 5,
}
try:
label_type_id = label_type_ids[label_type]
except Exception as e:
self._module.fail_json(msg="Failed to find id for label type '%s': %s" % (label_type, e))
return label_type_id
def _get_images_info(self, data, icon_ids):
images = [
{
'dot_tag': 'zbx_image',
'zbx_property': 'iconid_off',
'mandatory': True
},
{
'dot_tag': 'zbx_image_disabled',
'zbx_property': 'iconid_disabled',
'mandatory': False
},
{
'dot_tag': 'zbx_image_maintenance',
'zbx_property': 'iconid_maintenance',
'mandatory': False
},
{
'dot_tag': 'zbx_image_problem',
'zbx_property': 'iconid_on',
'mandatory': False
}
]
images_info = {}
default_image = self.default_image if self.default_image else sorted(icon_ids.items())[0][0]
for image in images:
image_name = data.get(image['dot_tag'], None)
if not image_name:
if image['mandatory']:
image_name = default_image
else:
continue
image_name = remove_quotes(image_name)
if image_name in icon_ids:
images_info[image['zbx_property']] = icon_ids[image_name]
if not image['mandatory']:
images_info['use_iconmap'] = 0
else:
self._module.fail_json(msg="Failed to find id for image '%s'" % image_name)
return images_info
def _get_element_type(self, data):
types = {
'host': 0,
'sysmap': 1,
'trigger': 2,
'group': 3,
'image': 4
}
element_type = {
'elementtype': types['image'],
}
if StrictVersion(self.api_version) < StrictVersion('3.4'):
element_type.update({
'elementid': "0",
})
for type_name, type_id in sorted(types.items()):
field_name = 'zbx_' + type_name
if field_name in data:
method_name = '_get_' + type_name + '_id'
element_name = remove_quotes(data[field_name])
get_element_id = getattr(self, method_name, None)
if get_element_id:
elementid = get_element_id(element_name)
if elementid and int(elementid) > 0:
element_type.update({
'elementtype': type_id,
'label': element_name
})
if StrictVersion(self.api_version) < StrictVersion('3.4'):
element_type.update({
'elementid': elementid,
})
else:
element_type.update({
'elements': [{
type_name + 'id': elementid,
}],
})
break
else:
self._module.fail_json(msg="Failed to find id for %s '%s'" % (type_name, element_name))
return element_type
# get list of map elements (nodes)
def _get_selements(self, graph, nodes, icon_ids):
selements = []
icon_sizes = {}
scales = self._get_scales(graph)
for selementid, (node, data) in enumerate(nodes.items(), start=1):
selement = {
'selementid': selementid
}
data['selementid'] = selementid
images_info = self._get_images_info(data, icon_ids)
selement.update(images_info)
image_id = images_info['iconid_off']
if image_id not in icon_sizes:
icon_sizes[image_id] = self._get_icon_size(image_id)
pos = self._convert_coordinates(data['pos'], scales, icon_sizes[image_id])
selement.update(pos)
selement['label'] = remove_quotes(node)
element_type = self._get_element_type(data)
selement.update(element_type)
label = self._get_label(data)
if label:
selement['label'] = label
urls = self._get_urls(data)
if urls:
selement['urls'] = urls
selements.append(selement)
return selements
def _get_links(self, nodes, edges):
links = {}
for edge in edges:
link_id = tuple(sorted(edge.obj_dict['points']))
node1, node2 = link_id
data = edge.obj_dict['attributes']
if "style" in data and data['style'] == "invis":
continue
if link_id not in links:
links[link_id] = {
'selementid1': min(nodes[node1]['selementid'], nodes[node2]['selementid']),
'selementid2': max(nodes[node1]['selementid'], nodes[node2]['selementid']),
}
link = links[link_id]
if "color" not in link:
link['color'] = self._get_color_hex(remove_quotes(data.get('color', 'green')))
if "zbx_draw_style" not in link:
link['drawtype'] = self._get_link_draw_style_id(remove_quotes(data.get('zbx_draw_style', 'line')))
label = self._get_label(data)
if label and "label" not in link:
link['label'] = label
triggers = self._get_triggers(data)
if triggers:
if "linktriggers" not in link:
link['linktriggers'] = []
link['linktriggers'] += triggers
return list(links.values())
def _get_urls(self, data):
urls = []
for url_raw in [remove_quotes(value) for key, value in data.items() if key.startswith("zbx_url")]:
try:
name, url = url_raw.split(':', 1)
except Exception as e:
self._module.fail_json(msg="Failed to parse zbx_url='%s': %s" % (url_raw, e))
urls.append({
'name': name,
'url': url,
})
return urls
def _get_triggers(self, data):
triggers = []
for trigger_definition in [remove_quotes(value) for key, value in data.items() if key.startswith("zbx_trigger")]:
triggerid = self._get_trigger_id(trigger_definition)
if triggerid:
triggers.append({
'triggerid': triggerid,
'color': self._get_color_hex(remove_quotes(data.get('zbx_trigger_color', 'red'))),
'drawtype': self._get_link_draw_style_id(remove_quotes(data.get('zbx_trigger_draw_style', 'bold'))),
})
else:
self._module.fail_json(msg="Failed to find trigger '%s'" % (trigger_definition))
return triggers
@staticmethod
def _get_label(data, default=None):
if "zbx_label" in data:
label = remove_quotes(data['zbx_label']).replace('\\n', '\n')
elif "label" in data:
label = remove_quotes(data['label'])
else:
label = default
return label
def _get_sysmap_id(self, map_name):
exist_map = self._zapi.map.get({'filter': {'name': map_name}})
if exist_map:
return exist_map[0]['sysmapid']
return None
def _get_group_id(self, group_name):
exist_group = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if exist_group:
return exist_group[0]['groupid']
return None
def map_exists(self):
return bool(self.map_id)
def create_map(self, map_config):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
result = self._zapi.map.create(map_config)
if result:
return result
except Exception as e:
self._module.fail_json(msg="Failed to create map: %s" % e)
def update_map(self, map_config):
if not self.map_id:
self._module.fail_json(msg="Failed to update map: map_id is unknown. Try to create_map instead.")
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
map_config['sysmapid'] = self.map_id
result = self._zapi.map.update(map_config)
if result:
return result
except Exception as e:
self._module.fail_json(msg="Failed to update map: %s" % e)
def delete_map(self):
if not self.map_id:
self._module.fail_json(msg="Failed to delete map: map_id is unknown.")
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.map.delete([self.map_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete map, Exception: %s" % e)
def is_exist_map_correct(self, generated_map_config):
exist_map_configs = self._zapi.map.get({
'sysmapids': self.map_id,
'selectLinks': 'extend',
'selectSelements': 'extend'
})
exist_map_config = exist_map_configs[0]
if not self._is_dicts_equal(generated_map_config, exist_map_config):
return False
if not self._is_selements_equal(generated_map_config['selements'], exist_map_config['selements']):
return False
self._update_ids(generated_map_config, exist_map_config)
if not self._is_links_equal(generated_map_config['links'], exist_map_config['links']):
return False
return True
def _get_selements_sort_keys(self):
keys_to_sort = ['label']
if StrictVersion(self.api_version) < StrictVersion('3.4'):
keys_to_sort.insert(0, 'elementid')
return keys_to_sort
def _is_selements_equal(self, generated_selements, exist_selements):
if len(generated_selements) != len(exist_selements):
return False
generated_selements_sorted = sorted(generated_selements, key=itemgetter(*self.selements_sort_keys))
exist_selements_sorted = sorted(exist_selements, key=itemgetter(*self.selements_sort_keys))
for (generated_selement, exist_selement) in zip(generated_selements_sorted, exist_selements_sorted):
if StrictVersion(self.api_version) >= StrictVersion("3.4"):
if not self._is_elements_equal(generated_selement.get('elements', []), exist_selement.get('elements', [])):
return False
if not self._is_dicts_equal(generated_selement, exist_selement, ['selementid']):
return False
if not self._is_urls_equal(generated_selement.get('urls', []), exist_selement.get('urls', [])):
return False
return True
def _is_urls_equal(self, generated_urls, exist_urls):
if len(generated_urls) != len(exist_urls):
return False
generated_urls_sorted = sorted(generated_urls, key=itemgetter('name', 'url'))
exist_urls_sorted = sorted(exist_urls, key=itemgetter('name', 'url'))
for (generated_url, exist_url) in zip(generated_urls_sorted, exist_urls_sorted):
if not self._is_dicts_equal(generated_url, exist_url, ['selementid']):
return False
return True
def _is_elements_equal(self, generated_elements, exist_elements):
if len(generated_elements) != len(exist_elements):
return False
generated_elements_sorted = sorted(generated_elements, key=lambda k: k.values()[0])
exist_elements_sorted = sorted(exist_elements, key=lambda k: k.values()[0])
for (generated_element, exist_element) in zip(generated_elements_sorted, exist_elements_sorted):
if not self._is_dicts_equal(generated_element, exist_element, ['selementid']):
return False
return True
# since generated IDs differ from real Zabbix ones, make real IDs match generated ones
def _update_ids(self, generated_map_config, exist_map_config):
generated_selements_sorted = sorted(generated_map_config['selements'], key=itemgetter(*self.selements_sort_keys))
exist_selements_sorted = sorted(exist_map_config['selements'], key=itemgetter(*self.selements_sort_keys))
id_mapping = {}
for (generated_selement, exist_selement) in zip(generated_selements_sorted, exist_selements_sorted):
id_mapping[exist_selement['selementid']] = generated_selement['selementid']
for link in exist_map_config['links']:
link['selementid1'] = id_mapping[link['selementid1']]
link['selementid2'] = id_mapping[link['selementid2']]
if link['selementid2'] < link['selementid1']:
link['selementid1'], link['selementid2'] = link['selementid2'], link['selementid1']
def _is_links_equal(self, generated_links, exist_links):
if len(generated_links) != len(exist_links):
return False
generated_links_sorted = sorted(generated_links, key=itemgetter('selementid1', 'selementid2', 'color', 'drawtype'))
exist_links_sorted = sorted(exist_links, key=itemgetter('selementid1', 'selementid2', 'color', 'drawtype'))
for (generated_link, exist_link) in zip(generated_links_sorted, exist_links_sorted):
if not self._is_dicts_equal(generated_link, exist_link, ['selementid1', 'selementid2']):
return False
if not self._is_triggers_equal(generated_link.get('linktriggers', []), exist_link.get('linktriggers', [])):
return False
return True
def _is_triggers_equal(self, generated_triggers, exist_triggers):
if len(generated_triggers) != len(exist_triggers):
return False
generated_triggers_sorted = sorted(generated_triggers, key=itemgetter('triggerid'))
exist_triggers_sorted = sorted(exist_triggers, key=itemgetter('triggerid'))
for (generated_trigger, exist_trigger) in zip(generated_triggers_sorted, exist_triggers_sorted):
if not self._is_dicts_equal(generated_trigger, exist_trigger):
return False
return True
@staticmethod
def _is_dicts_equal(d1, d2, exclude_keys=None):
if exclude_keys is None:
exclude_keys = []
for key in d1.keys():
if isinstance(d1[key], dict) or isinstance(d1[key], list):
continue
if key in exclude_keys:
continue
# compare as strings since Zabbix API returns everything as strings
if key not in d2 or str(d2[key]) != str(d1[key]):
return False
return True
def _get_host_id(self, hostname):
hostid = self._zapi.host.get({'filter': {'host': hostname}})
if hostid:
return str(hostid[0]['hostid'])
def _get_trigger_id(self, trigger_definition):
try:
host, trigger = trigger_definition.split(':', 1)
except Exception as e:
self._module.fail_json(msg="Failed to parse zbx_trigger='%s': %s" % (trigger_definition, e))
triggerid = self._zapi.trigger.get({
'host': host,
'filter': {
'description': trigger
}
})
if triggerid:
return str(triggerid[0]['triggerid'])
def _get_icon_ids(self):
icons_list = self._zapi.image.get({})
icon_ids = {}
for icon in icons_list:
icon_ids[icon['name']] = icon['imageid']
return icon_ids
def _get_icon_size(self, icon_id):
icons_list = self._zapi.image.get({
'imageids': [
icon_id
],
'select_image': True
})
if len(icons_list) > 0:
icon_base64 = icons_list[0]['image']
else:
self._module.fail_json(msg="Failed to find image with id %s" % icon_id)
image = Image.open(BytesIO(base64.b64decode(icon_base64)))
icon_width, icon_height = image.size
return icon_width, icon_height
@staticmethod
def _get_node_attributes(node):
attr = {}
if "attributes" in node.obj_dict:
attr.update(node.obj_dict['attributes'])
pos = node.get_pos()
if pos is not None:
pos = remove_quotes(pos)
xx, yy = pos.split(",")
attr['pos'] = (float(xx), float(yy))
return attr
def _get_graph_nodes(self, parent):
nodes = {}
for node in parent.get_nodes():
node_name = node.get_name()
if node_name in ('node', 'graph', 'edge'):
continue
nodes[node_name] = self._get_node_attributes(node)
for subgraph in parent.get_subgraphs():
nodes.update(self._get_graph_nodes(subgraph))
return nodes
def _get_graph_edges(self, parent):
edges = []
for edge in parent.get_edges():
edges.append(edge)
for subgraph in parent.get_subgraphs():
edges += self._get_graph_edges(subgraph)
return edges
def _get_scales(self, graph):
bb = remove_quotes(graph.get_bb())
min_x, min_y, max_x, max_y = bb.split(",")
scale_x = (self.width - self.margin * 2) / (float(max_x) - float(min_x)) if float(max_x) != float(min_x) else 0
scale_y = (self.height - self.margin * 2) / (float(max_y) - float(min_y)) if float(max_y) != float(min_y) else 0
return {
'min_x': float(min_x),
'min_y': float(min_y),
'max_x': float(max_x),
'max_y': float(max_y),
'scale_x': float(scale_x),
'scale_y': float(scale_y),
}
# transform Graphviz coordinates to Zabbix's ones
def _convert_coordinates(self, pos, scales, icon_size):
return {
'x': int((pos[0] - scales['min_x']) * scales['scale_x'] - icon_size[0] / 2 + self.margin),
'y': int((scales['max_y'] - pos[1] + scales['min_y']) * scales['scale_y'] - icon_size[1] / 2 + self.margin),
}
def _get_color_hex(self, color_name):
if color_name.startswith('#'):
color_hex = color_name
else:
try:
color_hex = webcolors.name_to_hex(color_name)
except Exception as e:
self._module.fail_json(msg="Failed to get RGB hex for color '%s': %s" % (color_name, e))
color_hex = color_hex.strip('#').upper()
return color_hex
def _get_link_draw_style_id(self, draw_style):
draw_style_ids = {
'line': 0,
'bold': 2,
'dotted': 3,
'dashed': 4
}
try:
draw_style_id = draw_style_ids[draw_style]
except Exception as e:
self._module.fail_json(msg="Failed to find id for draw type '%s': %s" % (draw_style, e))
return draw_style_id
# If a string has single or double quotes around it, remove them.
def remove_quotes(s):
if (s[0] == s[-1]) and s.startswith(("'", '"')):
s = s[1:-1]
return s
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
timeout=dict(type='int', default=10),
validate_certs=dict(type='bool', required=False, default=True),
name=dict(type='str', required=True, aliases=['map_name']),
data=dict(type='str', required=False, aliases=['dot_data']),
width=dict(type='int', default=800),
height=dict(type='int', default=600),
state=dict(default="present", choices=['present', 'absent']),
default_image=dict(type='str', required=False, aliases=['image']),
margin=dict(type='int', default=40),
expand_problem=dict(type='bool', default=True),
highlight=dict(type='bool', default=True),
label_type=dict(type='str', default='name', choices=['label', 'ip', 'name', 'status', 'nothing', 'custom']),
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
if not HAS_PYDOTPLUS:
module.fail_json(msg=missing_required_lib('pydotplus', url='https://pypi.org/project/pydotplus/'), exception=PYDOT_IMP_ERR)
if not HAS_WEBCOLORS:
module.fail_json(msg=missing_required_lib('webcolors', url='https://pypi.org/project/webcolors/'), exception=WEBCOLORS_IMP_ERR)
if not HAS_PIL:
module.fail_json(msg=missing_required_lib('Pillow', url='https://pypi.org/project/Pillow/'), exception=PIL_IMP_ERR)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
timeout = module.params['timeout']
validate_certs = module.params['validate_certs']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
sysmap = Map(module, zbx)
if sysmap.state == "absent":
if sysmap.map_exists():
sysmap.delete_map()
module.exit_json(changed=True, result="Successfully deleted map: %s" % sysmap.map_name)
else:
module.exit_json(changed=False)
else:
map_config = sysmap.get_map_config()
if sysmap.map_exists():
if sysmap.is_exist_map_correct(map_config):
module.exit_json(changed=False)
else:
sysmap.update_map(map_config)
module.exit_json(changed=True, result="Successfully updated map: %s" % sysmap.map_name)
else:
sysmap.create_map(map_config)
module.exit_json(changed=True, result="Successfully created map: %s" % sysmap.map_name)
if __name__ == '__main__':
main()
| Lujeni/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_map.py | Python | gpl-3.0 | 32,525 |
#from gluon.contrib.populate import populate
#if not db(db.auth_user).count():
# populate(db.auth_user,10)
| szimszon/Temporary-storage | models/db_wizard_populate.py | Python | gpl-3.0 | 111 |
#!/usr/bin/env python
## \file
## \ingroup tutorial_tmva_pytorch
## \notebook -nodraw
## This tutorial shows how to do multiclass classification in TMVA with neural
## networks trained with PyTorch.
##
## \macro_code
##
## \date 2020
## \author Anirudh Dagar <[email protected]> - IIT, Roorkee
from ROOT import TMVA, TFile, TTree, TCut, gROOT
from os.path import isfile
import torch
from torch import nn
# Setup TMVA
TMVA.Tools.Instance()
TMVA.PyMethodBase.PyInitialize()
output = TFile.Open('TMVA.root', 'RECREATE')
factory = TMVA.Factory('TMVAClassification', output,
'!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass')
# Load data
if not isfile('tmva_example_multiple_background.root'):
createDataMacro = str(gROOT.GetTutorialDir()) + '/tmva/createData.C'
print(createDataMacro)
gROOT.ProcessLine('.L {}'.format(createDataMacro))
gROOT.ProcessLine('create_MultipleBackground(4000)')
data = TFile.Open('tmva_example_multiple_background.root')
signal = data.Get('TreeS')
background0 = data.Get('TreeB0')
background1 = data.Get('TreeB1')
background2 = data.Get('TreeB2')
dataloader = TMVA.DataLoader('dataset')
for branch in signal.GetListOfBranches():
dataloader.AddVariable(branch.GetName())
dataloader.AddTree(signal, 'Signal')
dataloader.AddTree(background0, 'Background_0')
dataloader.AddTree(background1, 'Background_1')
dataloader.AddTree(background2, 'Background_2')
dataloader.PrepareTrainingAndTestTree(TCut(''),
'SplitMode=Random:NormMode=NumEvents:!V')
# Generate model
# Define model
model = nn.Sequential()
model.add_module('linear_1', nn.Linear(in_features=4, out_features=32))
model.add_module('relu', nn.ReLU())
model.add_module('linear_2', nn.Linear(in_features=32, out_features=4))
model.add_module('softmax', nn.Softmax(dim=1))
# Set loss and optimizer
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD
# Define train function
def train(model, train_loader, val_loader, num_epochs, batch_size, optimizer, criterion, save_best, scheduler):
trainer = optimizer(model.parameters(), lr=0.01)
schedule, schedulerSteps = scheduler
best_val = None
for epoch in range(num_epochs):
# Training Loop
# Set to train mode
model.train()
running_train_loss = 0.0
running_val_loss = 0.0
for i, (X, y) in enumerate(train_loader):
trainer.zero_grad()
output = model(X)
target = torch.max(y, 1)[1]
train_loss = criterion(output, target)
train_loss.backward()
trainer.step()
# print train statistics
running_train_loss += train_loss.item()
if i % 32 == 31: # print every 32 mini-batches
print("[{}, {}] train loss: {:.3f}".format(epoch+1, i+1, running_train_loss / 32))
running_train_loss = 0.0
if schedule:
schedule(optimizer, epoch, schedulerSteps)
# Validation Loop
# Set to eval mode
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(val_loader):
output = model(X)
target = torch.max(y, 1)[1]
val_loss = criterion(output, target)
running_val_loss += val_loss.item()
curr_val = running_val_loss / len(val_loader)
if save_best:
if best_val==None:
best_val = curr_val
best_val = save_best(model, curr_val, best_val)
# print val statistics per epoch
print("[{}] val loss: {:.3f}".format(epoch+1, curr_val))
running_val_loss = 0.0
print("Finished Training on {} Epochs!".format(epoch+1))
return model
# Define predict function
def predict(model, test_X, batch_size=32):
# Set to eval mode
model.eval()
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
predictions = []
with torch.no_grad():
for i, data in enumerate(test_loader):
X = data[0]
outputs = model(X)
predictions.append(outputs)
preds = torch.cat(predictions)
return preds.numpy()
load_model_custom_objects = {"optimizer": optimizer, "criterion": loss, "train_func": train, "predict_func": predict}
# Store model to file
# Convert the model to torchscript before saving
m = torch.jit.script(model)
torch.jit.save(m, "model.pt")
print(m)
# Book methods
factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher',
'!H:!V:Fisher:VarTransform=D,G')
factory.BookMethod(dataloader, TMVA.Types.kPyTorch, "PyTorch",
'H:!V:VarTransform=D,G:FilenameModel=model.pt:NumEpochs=20:BatchSize=32')
# Run TMVA
factory.TrainAllMethods()
factory.TestAllMethods()
factory.EvaluateAllMethods()
# Plot ROC Curves
roc = factory.GetROCCurve(dataloader)
roc.SaveAs('ROC_MulticlassPyTorch.png') | root-mirror/root | tutorials/tmva/pytorch/MulticlassPyTorch.py | Python | lgpl-2.1 | 5,018 |
import os
import argparse
import traceback
import logging
import urlparse
from oxdpython import Client
def test_openid_commands(config_file):
"""function that runs the commands in a interactive manner
:param config_file: config file location
"""
c = Client(config_file)
print "\n=> Registering client using register_site()"
oxd_id = c.register_site()
logging.info("Received: %s", oxd_id)
print "\n=> Update site registration"
updated = c.update_site()
c.config.set("client", "scope", "openid,profile")
logging.info("Received: %s", updated)
print "\n=> Getting auth URL"
auth_url = c.get_authorization_url()
print "Visit this URL in your browser: ", auth_url
logging.info("Received: %s", auth_url)
print "\n=> Getting tokens by code"
callback_url = raw_input("Enter redirected URL to parse tokens: ")
parsed = urlparse.urlparse(callback_url)
params = urlparse.parse_qs(parsed.query)
tokens = c.get_tokens_by_code(params['code'][0], params['state'][0])
logging.info("Received: %s", tokens)
print "\n=> Getting user info"
claims = c.get_user_info(tokens['access_token'])
logging.info("Received: %s", claims)
print "\n=> Getting new access token using refresh token"
new_token = c.get_access_token_by_refresh_token(tokens["refresh_token"])
logging.info("Received: %s", new_token)
print "\n=> Getting Logout URI"
logout_uri = c.get_logout_uri()
logging.info("Received: %s", logout_uri)
print "Visit this URL to logout: ", logout_uri
print "\n=> Remove Site"
oxd_id = c.remove_site()
logging.info("Received: %s", oxd_id)
def test_setup_client(config_file):
c = Client(config_file)
print "\n=> Setup Client"
response = c.setup_client()
logging.info("Received: %s", response)
print "\n=> Get Client Token"
# Set auto_update to False to prevent launching of new thread. auto_update
# is helpful for long running apps, but unnecessary for this test script
token = c.get_client_token(auto_update=False)
logging.info("Received: %s", token)
print "\n=> Introspect Access Token"
introspection = c.introspect_access_token(token['access_token'])
logging.info("Received: %s", introspection)
print "\n=> Remove Site"
oxd_id = c.remove_site()
logging.info("Received: %s", oxd_id)
def execute_test(test):
config = os.path.join(this_dir, 'openid_socket.cfg')
test_config = os.path.join(this_dir, 'test.cfg')
with open(config) as f:
with open(test_config, "w") as of:
of.write(f.read())
try:
test(test_config)
except:
print traceback.format_exc()
os.remove(test_config)
if __name__ == '__main__':
this_dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
tests = [test_openid_commands, test_setup_client]
for test in tests:
execute_test(test)
print "All tests complete."
| GluuFederation/oxd-python | examples/e2e_tests/openid_socket.py | Python | mit | 3,274 |
from textwrap import dedent
def dedent_and_trim(string: str) -> str:
return dedent(string.lstrip('\r\n').rstrip(' ')) | garcia/simfile | simfile/_private/dedent.py | Python | mit | 123 |
#
# gPrime - a web-based genealogy program
#
# Copyright (c) 2017 gPrime Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import tornado.web
from .handlers import BaseHandler
from ..forms import URLForm
class URLHandler(BaseHandler):
@tornado.web.authenticated
def get(self, prefix="", suffix=""):
"""
prefix = 'person/b2cfa6ca14d1f274465'
suffix = '1', or '1/delete' add, or edit
"""
_ = self.app.get_translate_func(self.current_user)
if "/" in suffix:
row, action = suffix.split("/")
else:
row, action = suffix, "view"
instance = self.app.get_object_from_url(prefix)
path = "urls.%s" % (int(row) - 1)
if prefix.count("/") > 2:
parts = prefix.split("/") # "/person/handle/somethings/", ["", "person", "handle", "sometings"]
path = ".".join(parts[3:]) + path
url = "/" + prefix + "/urls/" + row
subitem = instance.get_field(path)
form = URLForm(self, instance, subitem, path, url)
## FIXME: Handle add and delete
self.render("url.html",
**self.get_template_dict(tview=_("url"),
action=action,
search="",
page="",
form=form))
return
@tornado.web.authenticated
def post(self, prefix="", suffix=""):
"""
prefix = 'person/b2cfa6ca14d1f274465'
suffix = '1'
"""
_ = self.app.get_translate_func(self.current_user)
if "/" in suffix:
row, action = suffix.split("/", 1)
else:
row, action = suffix, "view"
instance = self.app.get_object_from_url(prefix)
path = "urls.%s" % (int(row) - 1)
if prefix.count("/") > 2:
parts = prefix.split("/") # "/person/handle/somethings/", ["", "person", "handle", "sometings"]
path = ".".join(parts[3:]) + path
url = "/" + prefix + "/urls/" + row
subitem = instance.get_field(path)
form = URLForm(self, instance, subitem, path, url)
form.save()
self.redirect(self.app.make_url(url))
| sam-m888/gprime | gprime/app/handlers/urlhandler.py | Python | gpl-2.0 | 2,945 |
from state import called
def setup():
called.append('test_pak1.test_mod.setup')
def teardown():
called.append('test_pak1.test_mod.teardown')
def test_one_mod_one():
called.append('test_pak1.test_mod.test_one_mod_one')
pass
| davidyezsetz/kuma | vendor/packages/nose/functional_tests/support/ltfn/test_pak1/test_mod.py | Python | mpl-2.0 | 243 |
from .gen.project_statuses import _ProjectStatuses
class ProjectStatuses(_ProjectStatuses):
"""Project Statuses resource"""
def create(self, project, params={}, **options):
self.create_in_project(project, params, **options)
def create_in_project(self, project, params={}, **options):
"""Creates a new status update on the project.
Returns the full record of the newly created project status update.
Parameters
----------
project : {Gid} The project on which to create a status update.
[data] : {Object} Data for the request
- text : {String} The text of the project status update.
- color : {String} The color to associate with the status update. Must be one of `"red"`, `"yellow"`, or `"green"`.
"""
path = "/projects/%s/project_statuses" % (project)
return self.client.post(path, params, **options)
def find_by_project(self, project, params={}, **options):
"""Returns the compact project status update records for all updates on the project.
Parameters
----------
project : {Gid} The project to find status updates for.
[params] : {Object} Parameters for the request
"""
path = "/projects/%s/project_statuses" % (project)
return self.client.get_collection(path, params, **options)
def find_by_id(self, project_status, params={}, **options):
"""Returns the complete record for a single status update.
Parameters
----------
project-status : {Gid} The project status update to get.
[params] : {Object} Parameters for the request
"""
path = "/project_statuses/%s" % (project_status)
return self.client.get(path, params, **options)
def delete(self, project_status, params={}, **options):
"""Deletes a specific, existing project status update.
Returns an empty data record.
Parameters
----------
project-status : {Gid} The project status update to delete.
"""
path = "/project_statuses/%s" % (project_status)
return self.client.delete(path, params, **options)
| Asana/python-asana | asana/resources/project_statuses.py | Python | mit | 2,184 |
#!/usr/bin/env python
import Command
import recalboxFiles
from generators.Generator import Generator
import os.path
import glob
class ViceGenerator(Generator):
# Main entry of the module
# Return command
def generate(self, system, rom, playersControllers):
# Settings recalbox default config file if no user defined one
if not system.config['configfile']:
# Using recalbox config file
#system.config['configfile'] = recalboxFiles.mupenCustom
pass
# Find rom path
romPath = os.path.dirname(rom)
romName = os.path.splitext(os.path.basename(rom))[0]
commandArray = [recalboxFiles.recalboxBins[system.config['emulator']],
"-config", recalboxFiles.viceConfig,
"-autostart", rom]
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
return Command.Command(videomode='default', array=commandArray, env={"SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"})
| recalbox/recalbox-configgen | configgen/generators/vice/viceGenerator.py | Python | mit | 1,079 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gevent.monkey; gevent.monkey.patch_all()
import gevent
import csv
import requests
import sys
def post_species(scientific, common, labels):
resp = requests.post('https://birdseye.space/v1/species', json={
'names': {
'scientific': scientific.lower(),
'common': common.lower()
},
'labels': [l.lower() for l in labels]
})
assert resp.status_code in (200, 201), 'Unexpected code: {}.'.format(
resp.status_code)
def main(filename):
print('Importing {}'.format(filename))
group_singular = {
'conifers': [
'conifer', 'plant', 'land plant', 'botany'],
'reptiles': [
'reptile', 'animal', 'cold blood', 'cold bloded', 'vertebrate',
'fauna'],
'turtles (non-marine)': [
'turtle', 'animal', 'non-marine', 'cold blood', 'cold bloded',
'vertebrate', 'fauna'],
'butterflies': [
'butterfly', 'animal', 'insect', 'moths and butterflies', 'fauna',
'invertebrate'],
'dragonflies': [
'dragonfly', 'animal', 'insect', 'dragonflies and damseflies',
'invertebrate', 'fauna'],
'mammals': [
'mammal', 'animal', 'warm blood', 'warm blooded', 'vertebrate',
'fauna'],
'birds': [
'bird', 'animal', 'warm blood', 'warm blooded', 'vertebrate',
'fauna'],
'amphibians': [
'amfibian', 'animal', 'vertebrate', 'fauna'],
'sphingid moths': [
'sphingid moth', 'moth', 'animal', 'insect', 'invertebrate',
'fauna', 'moths and butterflies'],
'bumblebees': [
'bumblebee', 'bee', 'bees', 'animal', 'insect', 'invertebrate'],
}
with open(filename, newline='') as f:
count = 0
# "Scientific Name","Common Name","Family","Taxonomic Group"
for row in csv.reader(f, delimiter=',', quotechar='"'):
count += 1
common = row[1]
if common == 'null':
common = row[0]
gevent.spawn(
post_species, row[0], common,
[row[2], row[3]] + group_singular[row[3].lower()])
if count >= 100:
gevent.wait()
count = 0
gevent.wait()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| MihaiBalint/birdseye-server | test-data/species-import.py | Python | mit | 2,428 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, models
class Users(models.Model):
_inherit = 'res.users'
def action_open_my_account_settings(self):
action = {
"name": _("Account Security"),
"type": "ir.actions.act_window",
"res_model": "res.users",
"views": [[self.env.ref('auth_totp_mail.res_users_view_form').id, "form"]],
"res_id": self.id,
}
return action
def get_totp_invite_url(self):
return '/web#action=auth_totp_mail.action_activate_two_factor_authentication'
def action_totp_invite(self):
invite_template = self.env.ref('auth_totp_mail.mail_template_totp_invite')
users_to_invite = self.sudo().filtered(lambda user: not user.totp_secret)
for user in users_to_invite:
email_values = {
'email_from': self.env.user.email_formatted,
'author_id': self.env.user.partner_id.id,
}
invite_template.send_mail(user.id, force_send=True, email_values=email_values,
email_layout_xmlid='mail.mail_notification_light')
# Display a confirmation toaster
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'type': 'info',
'sticky': False,
'message': _("Invitation to use two-factor authentication sent for the following user(s): %s",
', '.join(users_to_invite.mapped('name'))),
}
}
| jeremiahyan/odoo | addons/auth_totp_mail/models/res_users.py | Python | gpl-3.0 | 1,668 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.has_static_page'
db.add_column(u'projects_project', 'has_static_page',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.has_static_page'
db.delete_column(u'projects_project', 'has_static_page')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'projects.event': {
'Meta': {'object_name': 'Event'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'events'", 'blank': 'True', 'to': u"orm['projects.Organisation']"}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'projects.member': {
'Meta': {'object_name': 'Member'},
'availability': ('django.db.models.fields.CharField', [], {'default': "'reader'", 'max_length': '20'}),
'available_after': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_frequency': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '2'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_paid_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_contacted_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'latest_answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'offered_help': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'projects_active': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'active_members'", 'blank': 'True', 'to': u"orm['projects.Project']"}),
'projects_interests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'interested_members'", 'blank': 'True', 'to': u"orm['projects.Project']"}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': u"orm['projects.Skill']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': u"orm['projects.MemberType']"}),
'update_from_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member'", 'null': 'True', 'to': u"orm['projects.User']"}),
'working_on': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'working_members'", 'null': 'True', 'to': u"orm['projects.Project']"})
},
u'projects.membertype': {
'Meta': {'object_name': 'MemberType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.organisation': {
'Meta': {'object_name': 'Organisation'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'found_via': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middlemen': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'middleman_organisations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['projects.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'partnered_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'to': u"orm['projects.Project']"}),
'provided_help': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'representatives': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organisations'", 'blank': 'True', 'to': u"orm['projects.OrganisationType']"}),
'working_with': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'projects.organisationtype': {
'Meta': {'object_name': 'OrganisationType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.project': {
'Meta': {'ordering': "['order']", 'object_name': 'Project'},
'complimenting_color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'facebook_group': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_static_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forced_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'logo_styled': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'logo_thumb': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pm_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'strategy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'projects.projectactivity': {
'Meta': {'object_name': 'ProjectActivity'},
'can_accomodate': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_organisational': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activities'", 'null': 'True', 'to': u"orm['projects.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'project_activities'", 'symmetrical': 'False', 'through': u"orm['projects.UserActivity']", 'to': u"orm['projects.User']"})
},
u'projects.projectmilestone': {
'Meta': {'object_name': 'ProjectMilestone'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_technical': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'percent': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '3'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['projects.Project']"}),
'target_date': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'projects.projectmotive': {
'Meta': {'object_name': 'ProjectMotive'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'motives'", 'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'projects.projectusageexamplestep': {
'Meta': {'object_name': 'ProjectUsageExampleStep'},
'example_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'example_steps'", 'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
u'projects.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'skills'", 'blank': 'True', 'to': u"orm['projects.SkillGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.skillgroup': {
'Meta': {'object_name': 'SkillGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'projects.task': {
'Meta': {'object_name': 'Task'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['projects.ProjectActivity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'max_length': '4'})
},
u'projects.update': {
'Meta': {'object_name': 'Update'},
'change': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'projects.user': {
'Meta': {'object_name': 'User'},
'available_after': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'bio': ('django.db.models.fields.TextField', [], {}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'has_confirmed_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'projects_interests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'interested_users'", 'blank': 'True', 'to': u"orm['projects.Project']"}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['projects.Skill']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'projects.useractivity': {
'Meta': {'unique_together': "(('person', 'project_activity'),)", 'object_name': 'UserActivity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_stopped_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'needs_replacement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['projects.User']"}),
'progress': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'project_activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_activities'", 'to': u"orm['projects.ProjectActivity']"})
},
u'projects.userpointspending': {
'Meta': {'object_name': 'UserPointSpending'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'spendings'", 'to': u"orm['projects.User']"}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '10'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'projects.userprojectpause': {
'Meta': {'unique_together': "(('project', 'person'),)", 'object_name': 'UserProjectPause'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pauses'", 'to': u"orm['projects.User']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pauses'", 'to': u"orm['projects.Project']"})
}
}
complete_apps = ['projects'] | tochev/obshtestvo.bg | projects/migrations/0017_auto__add_field_project_has_static_page.py | Python | unlicense | 19,837 |
"""Common utilities for local and remote clients."""
import re
import os
import stat
class BaseClient(object):
@staticmethod
def set_path_readonly(path):
current = os.stat(path).st_mode
if os.path.isdir(path):
# Need to add
right = (stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IRUSR)
if current & ~right == 0:
return
os.chmod(path, right)
else:
# Already in read only
right = (stat.S_IRGRP | stat.S_IRUSR)
if current & ~right == 0:
return
os.chmod(path, right)
@staticmethod
def unset_path_readonly(path):
current = os.stat(path).st_mode
if os.path.isdir(path):
right = (stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IRUSR | stat.S_IWGRP | stat.S_IWUSR)
if current & right == right:
return
os.chmod(path, right)
else:
right = (stat.S_IRGRP | stat.S_IRUSR |
stat.S_IWGRP | stat.S_IWUSR)
if current & right == right:
return
os.chmod(path, right)
def unlock_path(self, path, unlock_parent=True):
result = 0
if unlock_parent:
parent_path = os.path.dirname(path)
if (os.path.exists(parent_path) and
not os.access(parent_path, os.W_OK)):
self.unset_path_readonly(parent_path)
result |= 2
if os.path.exists(path) and not os.access(path, os.W_OK):
self.unset_path_readonly(path)
result |= 1
return result
def lock_path(self, path, locker):
if locker == 0:
return
if locker & 1 == 1:
self.set_path_readonly(path)
if locker & 2 == 2:
parent = os.path.dirname(path)
self.set_path_readonly(parent)
class NotFound(Exception):
pass
DEFAULT_REPOSITORY_NAME = 'default'
DEFAULT_IGNORED_PREFIXES = [
'.', # hidden Unix files
'~$', # Windows lock files
'Thumbs.db', # Thumbnails files
'Icon\r', # Mac Icon
'desktop.ini', # Icon for windows
]
DEFAULT_IGNORED_SUFFIXES = [
'~', # editor buffers
'.swp', # vim swap files
'.lock', # some process use file locks
'.LOCK', # other locks
'.part', '.crdownload', '.partial', # partially downloaded files by browsers
]
# Default buffer size for file upload / download and digest computation
FILE_BUFFER_SIZE = 1024 ** 2
# Name of the folder holding the files locally edited from Nuxeo
LOCALLY_EDITED_FOLDER_NAME = 'Locally Edited'
COLLECTION_SYNC_ROOT_FACTORY_NAME = 'collectionSyncRootFolderItemFactory'
UNACCESSIBLE_HASH = "TO_COMPUTE"
def safe_filename(name, replacement=u'-'):
"""Replace invalid character in candidate filename"""
return re.sub(ur'(/|\\|\*|:|\||"|<|>|\?)', replacement, name)
| arameshkumar/base-nuxeo-drive | nuxeo-drive-client/nxdrive/client/common.py | Python | lgpl-2.1 | 2,976 |
#!/usr/bin/env python2
import sys, time, datetime
from Nonin import *
finished = False
while not finished:
try:
device = sys.argv[1]
nonin = Nonin3150(device)
time.sleep(2)
print 'Attempting to read session data on the device...'
sessions = nonin.read_sessions()
dt = datetime.datetime.now()
dt_str = dt.strftime('%Y-%m-%dT%H:%M:%S')
csv_filename = '/tmp/wristox-sessions.%d.%s.csv' % (982146, dt_str)
print 'Exporting data...'
exporter = Exporter(sessions)
exporter.export(format='csv', filename=csv_filename)
finished = True
except Exception as e:
print e
# if nonin:
# nonin.device.close()
time.sleep(2)
# import pdb
# pdb.set_trace()
# for session in sessions[0]: print session
# print "Finished!"
| Hammit/wais-pop | bin/Nonin/get-data.py | Python | mit | 852 |
# -*- encoding: utf-8 -*-
"""Implements Packages UI"""
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class Package(Base):
"""Manipulates Packages from UI"""
is_katello = True
def navigate_to_entity(self):
"""Navigate to Package entity page"""
Navigator(self.browser).go_to_packages()
def _search_locator(self):
"""Specify locator for Package entity search procedure"""
return locators['package.rpm_name']
def check_package_details(self, name, parameter_list=None):
"""Check whether package detail section contains expected values or
raise exception otherwise.
All values should be passed in absolute correspondence to UI. For
example, we have 'Description' or 'Checksum Type' fields, so next
parameter list should be passed::
[
['Description', 'Expected description'],
['Checksum Type', 'sha256'],
]
"""
self.click(self.search(name))
for parameter_name, parameter_value in parameter_list:
actual_text = self.wait_until_element(
locators['package.field_value'] % parameter_name
).text
if actual_text != parameter_value:
raise UIError(
'Actual text for "{0}" parameter is "{1}", but it is'
' expected to have "{2}"'.format(
parameter_name, actual_text, parameter_value)
)
def check_file_list(self, name, file_list):
"""Check whether necessary file(s) are present in the package"""
self.click(self.search(name))
self.click(tab_locators['package.tab_files'])
for package_file in file_list:
self.wait_until_element(
locators['package.content_file'] % package_file)
def search_in_repo(self, name, repo_name, expecting_results=True):
"""Search for package and filter results by repository"""
self.search(name)
self.select_repo(repo_name)
# In case we expecting that search should not find any entity
if expecting_results is False:
return self.wait_until_element(
common_locators['kt_search_no_results'])
return self.wait_until_element(self._search_locator() % name)
| ares/robottelo | robottelo/ui/packages.py | Python | gpl-3.0 | 2,451 |
import cv2
import time
cap=cv2.VideoCapture('max2.mp4')
def detect(frame):
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(frame, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (200,200))
if len(rects) == 0:
return [], frame
rects[:, 2:] += rects[:, :2]
return rects, frame
def box(rects, frame):
for x1, y1, x2, y2 in rects:
cropped=frame[y1:y2,x1:x2]
cv2.imshow("det",cropped)
name = 'images/'+str(int(time.time()))+'.jpg'
blkImg=cv2.cvtColor(cropped,cv2.COLOR_BGR2GRAY)
blkImg1= cv2.resize(blkImg,(100,100))
cv2.imwrite(name,blkImg1)
cv2.rectangle(frame, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.namedWindow('detected',cv2.WINDOW_NORMAL)
cv2.imshow('detected', frame);
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
while(cap.isOpened()):
ret, frame=cap.read()
rects, img = detect(frame)
box(rects, img)
#time.sleep(0.5)
| mwaruwa/jkuat-projects | Final-Project/FaceEngine/Trainer/Trainer.py | Python | apache-2.0 | 1,044 |
"""
Test lldb process launch flags.
"""
from __future__ import print_function
import copy
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import six
class ProcessLaunchTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.runCmd("settings set auto-confirm true")
def tearDown(self):
self.runCmd("settings clear auto-confirm")
TestBase.tearDown(self)
@not_remote_testsuite_ready
def test_io(self):
"""Test that process launch I/O redirection flags work properly."""
self.build()
exe = self.getBuildArtifact("a.out")
self.expect("file " + exe,
patterns=["Current executable set to .*a.out"])
in_file = os.path.join(self.getSourceDir(), "input-file.txt")
out_file = lldbutil.append_to_process_working_directory(self, "output-test.out")
err_file = lldbutil.append_to_process_working_directory(self, "output-test.err")
# Make sure the output files do not exist before launching the process
try:
os.remove(out_file)
except OSError:
pass
try:
os.remove(err_file)
except OSError:
pass
launch_command = "process launch -i '{0}' -o '{1}' -e '{2}' -w '{3}'".format(
in_file, out_file, err_file, self.get_process_working_directory())
if lldb.remote_platform:
self.runCmd('platform put-file "{local}" "{remote}"'.format(
local=in_file, remote=in_file))
self.expect(launch_command,
patterns=["Process .* launched: .*a.out"])
success = True
err_msg = ""
out = lldbutil.read_file_on_target(self, out_file)
if out != "This should go to stdout.\n":
success = False
err_msg = err_msg + " ERROR: stdout file does not contain correct output.\n"
err = lldbutil.read_file_on_target(self, err_file)
if err != "This should go to stderr.\n":
success = False
err_msg = err_msg + " ERROR: stderr file does not contain correct output.\n"
if not success:
self.fail(err_msg)
# rdar://problem/9056462
# The process launch flag '-w' for setting the current working directory
# not working?
@not_remote_testsuite_ready
@expectedFailureAll(oslist=["linux"], bugnumber="llvm.org/pr20265")
def test_set_working_dir_nonexisting(self):
"""Test that '-w dir' fails to set the working dir when running the inferior with a dir which doesn't exist."""
d = {'CXX_SOURCES': 'print_cwd.cpp'}
self.build(dictionary=d)
self.setTearDownCleanup(d)
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe)
mywd = 'my_working_dir'
out_file_name = "my_working_dir_test.out"
err_file_name = "my_working_dir_test.err"
my_working_dir_path = self.getBuildArtifact(mywd)
out_file_path = os.path.join(my_working_dir_path, out_file_name)
err_file_path = os.path.join(my_working_dir_path, err_file_name)
# Check that we get an error when we have a nonexisting path
invalid_dir_path = mywd + 'z'
launch_command = "process launch -w %s -o %s -e %s" % (
invalid_dir_path, out_file_path, err_file_path)
self.expect(
launch_command, error=True, patterns=[
"error:.* No such file or directory: %s" %
invalid_dir_path])
@not_remote_testsuite_ready
def test_set_working_dir_existing(self):
"""Test that '-w dir' sets the working dir when running the inferior."""
d = {'CXX_SOURCES': 'print_cwd.cpp'}
self.build(dictionary=d)
self.setTearDownCleanup(d)
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe)
mywd = 'my_working_dir'
out_file_name = "my_working_dir_test.out"
err_file_name = "my_working_dir_test.err"
my_working_dir_path = self.getBuildArtifact(mywd)
lldbutil.mkdir_p(my_working_dir_path)
out_file_path = os.path.join(my_working_dir_path, out_file_name)
err_file_path = os.path.join(my_working_dir_path, err_file_name)
# Make sure the output files do not exist before launching the process
try:
os.remove(out_file_path)
os.remove(err_file_path)
except OSError:
pass
launch_command = "process launch -w %s -o %s -e %s" % (
my_working_dir_path, out_file_path, err_file_path)
self.expect(launch_command,
patterns=["Process .* launched: .*a.out"])
success = True
err_msg = ""
# Check to see if the 'stdout' file was created
try:
out_f = open(out_file_path)
except IOError:
success = False
err_msg = err_msg + "ERROR: stdout file was not created.\n"
else:
# Check to see if the 'stdout' file contains the right output
line = out_f.readline()
if self.TraceOn():
print("line:", line)
if not re.search(mywd, line):
success = False
err_msg = err_msg + "The current working directory was not set correctly.\n"
out_f.close()
# Try to delete the 'stdout' and 'stderr' files
try:
os.remove(out_file_path)
os.remove(err_file_path)
pass
except OSError:
pass
if not success:
self.fail(err_msg)
def test_environment_with_special_char(self):
"""Test that environment variables containing '*' and '}' are handled correctly by the inferior."""
source = 'print_env.cpp'
d = {'CXX_SOURCES': source}
self.build(dictionary=d)
self.setTearDownCleanup(d)
exe = self.getBuildArtifact("a.out")
evil_var = 'INIT*MIDDLE}TAIL'
target = self.dbg.CreateTarget(exe)
main_source_spec = lldb.SBFileSpec(source)
breakpoint = target.BreakpointCreateBySourceRegex(
'// Set breakpoint here.', main_source_spec)
process = target.LaunchSimple(None,
['EVIL=' + evil_var],
self.get_process_working_directory())
self.assertEqual(
process.GetState(),
lldb.eStateStopped,
PROCESS_STOPPED)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertEqual(len(threads), 1)
frame = threads[0].GetFrameAtIndex(0)
sbvalue = frame.EvaluateExpression("evil")
value = sbvalue.GetSummary().strip('"')
self.assertEqual(value, evil_var)
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateExited, PROCESS_EXITED)
pass
| youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/process_launch/TestProcessLaunch.py | Python | bsd-3-clause | 7,171 |
#!/usr/bin/env python
# Copyright(c)2012-2013 Internet Archive. Software license AGPL version 3.
#
# This file is part of the `surt` python package.
#
# surt is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# surt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with surt. If not, see <http://www.gnu.org/licenses/>.
#
# The surt source is hosted at https://github.com/internetarchive/surt
"""This is a python port of DefaultIAURLCanonicalizer.java:
http://archive-access.svn.sourceforge.net/viewvc/archive-access/trunk/archive-access/projects/archive-commons/src/main/java/org/archive/url/DefaultIAURLCanonicalizer.java?view=markup
The doctests are copied from DefaultIAURLCanonicalizerTest.java:
http://archive-access.svn.sourceforge.net/viewvc/archive-access/trunk/archive-access/projects/archive-commons/src/test/java/org/archive/url/DefaultIAURLCanonicalizerTest.java?view=markup
"""
import GoogleURLCanonicalizer
import IAURLCanonicalizer
# canonicalize()
#_______________________________________________________________________________
def canonicalize(url, **options):
"""The input url is a handyurl instance
These doctests are from DefaultIAURLCanonicalizerTest.java:
>>> from handyurl import handyurl
>>> canonicalize(handyurl.parse("http://www.alexa.com/")).getURLString()
'http://alexa.com/'
>>> canonicalize(handyurl.parse("http://archive.org/index.html")).getURLString()
'http://archive.org/index.html'
>>> canonicalize(handyurl.parse("http://archive.org/index.html?")).getURLString()
'http://archive.org/index.html'
>>> canonicalize(handyurl.parse("http://archive.org/index.html?a=b")).getURLString()
'http://archive.org/index.html?a=b'
>>> canonicalize(handyurl.parse("http://archive.org/index.html?b=b&a=b")).getURLString()
'http://archive.org/index.html?a=b&b=b'
>>> canonicalize(handyurl.parse("http://archive.org/index.html?b=a&b=b&a=b")).getURLString()
'http://archive.org/index.html?a=b&b=a&b=b'
>>> canonicalize(handyurl.parse("http://www34.archive.org/index.html?b=a&b=b&a=b")).getURLString()
'http://archive.org/index.html?a=b&b=a&b=b'
"""
url = GoogleURLCanonicalizer.canonicalize(url, **options)
url = IAURLCanonicalizer.canonicalize(url, **options)
return url
# main()
#_______________________________________________________________________________
if __name__ == "__main__":
import doctest
doctest.testmod()
| pombredanne/surt | surt/DefaultIAURLCanonicalizer.py | Python | agpl-3.0 | 2,949 |
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Runge Kutta solvers."""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
ButcherTableau = collections.namedtuple('ButcherTableau',
['a', 'b', 'c_sol', 'c_mid', 'c_error'])
# A mnemonic device that organizes coefficients of Runge-Kutta schemes.
def _possibly_nonzero(value):
"""Returns Python boolean indicating whether `value` can be non-zero.
Args:
value: `Tensor` or numpy array that is tested on being certainly zero.
Tensors are considered non-zero.
Returns:
possibly_nonzero: `False` if `value` is deterministically zero, `True`
otherwise.
"""
static_value = tf.get_static_value(value)
if static_value is None:
return True
else:
return np.all(static_value != 0)
def abs_square(x):
"""Returns the squared value of `tf.abs(x)` for real and complex dtypes."""
if dtype_util.is_complex(x.dtype):
return tf.math.square(tf.math.real(x)) + tf.math.square(tf.math.imag(x))
else:
return tf.math.square(x)
def weighted_sum(weights, list_of_states):
"""Computes a weighted sum of `list_of_states`.
Args:
weights: List of scalar tensors.
list_of_states: List of states. Every element is assumed to be of the same
structure of Tensors. Must be of the same length as `weights`.
Returns:
weighted_sum: A weighted sum of states in `list_of_states`. Has the same
structure as elements of `list_of_states`.
Raises:
ValueError: If `list_of_states` is empty or length doesn't match `weights`.
"""
with tf.name_scope('weighted_sum'):
if not weights:
raise ValueError('`list_of_states` and `weights` must be non-empty')
if len(weights) != len(list_of_states):
raise ValueError('`weights` and `list_of_states` must have same length')
for state in list_of_states:
tf.nest.assert_same_structure(state, list_of_states[-1])
weights_and_states = zip(weights, list_of_states)
weighted_states = [
[tf.cast(w, s_comp.dtype) * s_comp for s_comp in tf.nest.flatten(s)]
for w, s in weights_and_states if _possibly_nonzero(w)
]
list_of_components = zip(*weighted_states) # Put same components together.
flat_final_state = [tf.add_n(component) for component in list_of_components]
if not flat_final_state:
return nest_constant(list_of_states[0], 0.0)
return tf.nest.pack_sequence_as(list_of_states[0], flat_final_state)
def nest_constant(structure, value=1.0, dtype=None):
"""Constructs a nested structure similar to `structure` with constant values.
Args:
structure: A reference nested structure that is used for constant.
value: Floating scalar setting the value of each entry of the structure.
Default value: `1.0`.
dtype: Optional dtype that specifies the dtype of the constant structure
being produced. If `None`, then dtype is inferred from components of the
structure.
Default value: `None`.
Returns:
nest: Possibly nested structure of `Tensor`s with all entries equal to
`value`. Has the same structure as `structure`.
"""
flat_structure = tf.nest.flatten(structure)
if dtype is None:
dtypes = [c.dtype for c in flat_structure]
else:
dtypes = [dtype] * len(flat_structure)
flat_vals = [
value * tf.ones_like(c, dtype=dtype)
for c, dtype in zip(flat_structure, dtypes)
]
return tf.nest.pack_sequence_as(structure, flat_vals)
def nest_rms_norm(nest):
"""Computes root mean squared norm of nested structure of `Tensor`s.
Args:
nest: Possibly nested structure of `Tensor`s of which RMS norm is computed.
Returns:
norm: Scalar floating tensor equal to the RMS norm of `nest.
"""
sizes = tf.nest.map_structure(tf.size, nest)
num_elements = tf.add_n(tf.nest.flatten(sizes))
def averaged_sum_squares(input_tensor):
num_elements_cast = tf.cast(
num_elements, dtype=dtype_util.real_dtype(input_tensor.dtype))
return tf.reduce_sum(abs_square(input_tensor)) / num_elements_cast
squared_sums = tf.nest.map_structure(averaged_sum_squares, nest)
norm = tf.math.sqrt(tf.add_n(tf.nest.flatten(squared_sums)))
return norm
def nest_where(accept_step, new_values, old_values):
"""Returns `new_values` if `accept_step` is True `old_values` otherwise.
Uses `tf.where` on individual elements to select `new_values` or `old_values`.
Args:
accept_step: Scalar boolean `Tensor` indicating whether to return
`new_values`.
new_values: Possible nested structure of `Tensor`s.
old_values: Possible nested structure of `Tensor`s. Must have the same
structure as `new_values`.
Returns:
values: `new_values` if `accept_step` is True and `old_values` otherwise.
"""
tf.nest.assert_same_structure(new_values, old_values)
select_new_or_old = lambda x, y: tf.where(accept_step, x, y)
values = tf.nest.map_structure(select_new_or_old, new_values, old_values)
return values
def _fourth_order_interpolation_coefficients(y0, y1, y_mid, f0, f1, dt):
"""Fits coefficients for 4th order polynomial interpolation.
Args:
y0: state value at the start of the interval.
y1: state value at the end of the interval.
y_mid: state value at the mid-point of the interval.
f0: state derivative value at the start of the interval.
f1: state derivative value at the end of the interval.
dt: width of the interval.
Returns:
coefficients: List of coefficients `[a, b, c, d, e]` for interpolating with
the polynomial `p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for
values of `x` between 0 (start of interval) and 1 (end of interval).
"""
# Formulas for interpolation coefficients were computed as follows:
# ```None
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
# ```
with tf.name_scope('interpolation_coefficients'):
a = weighted_sum([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = weighted_sum([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = weighted_sum([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = weighted_sum([dt], [f0])
e = y0
return [a, b, c, d, e]
def rk_fourth_order_interpolation_coefficients(y0, y1, k, dt, tableau):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step.
Performs 4th order interpolation based on state and state derivative values
determined in the Runge-Kutta state.
Args:
y0: State value at the start of the interval.
y1: State value at the end of the interval.
k: List of state values at RK k-points.
dt: Width of the interval.
tableau: `ButcherTableau` describing a Runge-Kutta scheme.
Returns:
coefficients: List of coefficients that interpolate the solution.
"""
with tf.name_scope('interp_fit_rk'):
y_mid = weighted_sum([1.0, dt], [y0, weighted_sum(tableau.c_mid, k)])
f0 = k[0]
f1 = k[-1]
return _fourth_order_interpolation_coefficients(y0, y1, y_mid, f0, f1, dt)
def evaluate_interpolation(coefficients, t0, t1, t, validate_args=False):
"""Evaluates the value of polynomial interpolation at the given time point.
Args:
coefficients: List of `Tensor`s that hold polynomial coefficients. Must have
length greater or equal to 2.
t0: Scalar floating `Tensor` giving the start of the interval.
t1: Scalar floating `Tensor` giving the end of the interval.
t: Scalar floating `Tensor` giving the desired interpolation point.
validate_args: Python `bool` indicating whether to validate inputs.
Default value: False.
Returns:
interpolated_value: Polynomial interpolation at time `t`.
Raises:
ValueError: If `coefficients` has less than 2 elements.
"""
if len(coefficients) < 2:
raise ValueError('`coefficients` must have at least 2 elements.')
with tf.name_scope('interp_evaluate'):
dtype = dtype_util.common_dtype(coefficients)
t0 = tf.convert_to_tensor(t0)
t1 = tf.convert_to_tensor(t1)
t = tf.convert_to_tensor(t)
assert_ops = []
if validate_args:
assert_ops.append(tf.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1]))
with tf.control_dependencies(assert_ops):
x = tf.cast((t - t0) / (t1 - t0), dtype)
xs = [tf.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return weighted_sum(list(reversed(xs)), coefficients)
def runge_kutta_step(ode_fn,
y0,
f0,
t0,
dt,
tableau,
name='runge_kutta_step'):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
ode_fn: Callable(t, y) -> dy_dt that evaluate the time derivative of `y`.
y0: `Tensor` initial value for the state.
f0: `Tensor` initial value for the derivative of `y0` = `ode_fn(t0, y0)`.
t0: `Tensor` value for the initial time.
dt: `Tensor` value for the desired time step.
tableau: `ButcherTableau` describing how to take the Runge-Kutta step.
name: optional name for the operation.
Returns:
rk_state_tuple: Tuple `(y1, f1, y1_error, k)` giving the estimated function
value after the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the
state at `t1`, estimated error at `t1`, and a list of Runge-Kutta
coefficients `k` used for calculating these terms.
"""
with tf.name_scope(name):
y0 = tf.nest.map_structure(tf.convert_to_tensor, y0)
f0 = tf.nest.map_structure(tf.convert_to_tensor, f0)
t0 = tf.convert_to_tensor(t0, name='t0')
dt = tf.convert_to_tensor(dt, name='dt')
k = [f0]
for a_i, b_i in zip(tableau.a, tableau.b):
ti = t0 + a_i * dt
yi = weighted_sum([1.0, dt], [y0, weighted_sum(b_i, k)])
k.append(ode_fn(ti, yi))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.b[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = weighted_sum([1.0, dt], [y0, weighted_sum(tableau.c_sol, k)])
y1 = yi
f1 = k[-1]
y1_error = weighted_sum([dt], [weighted_sum(tableau.c_error, k)])
return y1, f1, y1_error, k
| tensorflow/probability | tensorflow_probability/python/math/ode/runge_kutta_util.py | Python | apache-2.0 | 11,551 |
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Gravatar(Platform):
"""A <Platform> object for Gravatar"""
def __init__(self):
self.platformName = "Gravatar"
self.tags = ["identity"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://es.gravatar.com/" + "<usufy>" + ".json"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ['"User not found"']
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
self.fieldsRegExp["usufy"]["i3visio.fullname"] = '<div class="user-name fn">([^<]+)</div>'
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| i3visio/osrframework | osrframework/wrappers/gravatar.py | Python | agpl-3.0 | 3,920 |
from .sloealbum import SloeAlbum
from .sloebaseplugin import SloeBasePlugIn
from .sloeconfig import SloeConfig
from .sloeconfigspec import SloeConfigSpec
from .sloeerror import SloeError
from .sloeexecutil import SloeExecUtil
from .sloegenspec import SloeGenSpec
from .sloeitem import SloeItem
from .sloelocalexec import SloeLocalExec
from .sloeorder import SloeOrder
from .sloeoutputspec import SloeOutputSpec
from .sloeoutpututil import SloeOutputUtil
from .sloeplaylist import SloePlaylist
from .sloepluginmanager import SloePlugInManager
from .sloeremoteitem import SloeRemoteItem
from .sloerenderjob import SloeRenderJob
from .sloesandbox import SloeSandbox
from .sloetransferjob import SloeTransferJob
from .sloetransferspec import SloeTransferSpec
from .sloetree import SloeTree
from .sloetreenode import SloeTreeNode
from .sloetrees import SloeTrees
from .sloetreeutil import SloeTreeUtil
from .sloeutil import SloeUtil
from .sloevarutil import SloeVarUtil
from .sloevideoutil import SloeVideoUtil
from .sloeworkmanager import SloeWorkManager
__all__ = ["sloealbum", "sloebaseplugin", "sloeconfig", "sloeconfigspec", "sloeerror", "sloeexecutil", "sloegenspec", "sloeitem",
"sloelocalexec", "sloeorder", "sloeoutputspec", "sloeoutpututil", "sloeplaylist", "sloepluginmanager",
"sloeremoteitem", "sloerenderjob", "sloesandbox", "sloetransferjob", "sloetransferspec",
"sloetree", "sloetreenode", "sloetrees", "sloetreeutil", "sloeutil",
"sloevarutil", "sloevideoutil", "sloeworkmanager"]
__version__ = "0.0.1"
| sloe/chan | sloelib/__init__.py | Python | apache-2.0 | 1,562 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.