code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from rest_framework import status
from rest_framework.test import APITestCase
from sigma_core.models.user import User
from sigma_core.models.group import Group
from sigma_core.models.group_field import GroupField
from sigma_core.models.group_member import GroupMember
from sigma_core.models.group_field_value import GroupFieldValue
from sigma_core.serializers.group_field_value import GroupFieldValueSerializer
#
class GroupFieldValueTests(APITestCase):
@classmethod
def setUpTestData(self):
super(APITestCase, self).setUpTestData()
self.group_member_value_url = '/group-field-value/'
self.nomember = User.objects.create(email='[email protected]', lastname='Nomembre', firstname='Bemmonre');
self.memberA = User.objects.create(email='[email protected]', lastname='MembreB', firstname='Bremmeb');
self.memberB = User.objects.create(email='[email protected]', lastname='MembreA', firstname='Remameb');
self.admin = User.objects.create(email='[email protected]', lastname='Admin', firstname='Nimad');
self.group = Group.objects.create(name="Groupe de test", description="")
self.membershipA = GroupMember.objects.create(user=self.memberA, group=self.group)
self.membershipB = GroupMember.objects.create(user=self.memberB, group=self.group)
self.adminship = GroupMember.objects.create(user=self.admin, group=self.group, is_administrator=True)
# Champs protégés/non protégés pour tester la création
self.protectedFieldCr = GroupField.objects.create(group=self.group, name="Champ protégé", type=GroupField.TYPE_STRING, protected=True)
self.notProtectedFieldCr = GroupField.objects.create(group=self.group, name="Champ non protégé", type=GroupField.TYPE_STRING)
# Champs protégés/non protégés, et leurs valeurs correspondantes, pour tester l'update et la suppression
self.protectedField = GroupField.objects.create(group=self.group, name="Champ protégé", type=GroupField.TYPE_STRING, protected=True)
self.notProtectedField = GroupField.objects.create(group=self.group, name="Champ non protégé", type=GroupField.TYPE_STRING)
self.protectedValueA = GroupFieldValue.objects.create(membership=self.membershipA, field=self.protectedField, value='')
self.protectedValueB = GroupFieldValue.objects.create(membership=self.membershipB, field=self.protectedField, value='')
self.protectedValueAdmin = GroupFieldValue.objects.create(membership=self.adminship, field=self.protectedField, value='')
self.notProtectedValueA = GroupFieldValue.objects.create(membership=self.membershipA, field=self.notProtectedField, value='')
self.notProtectedValueB = GroupFieldValue.objects.create(membership=self.membershipB, field=self.notProtectedField, value='')
self.notProtectedValueAdmin = GroupFieldValue.objects.create(membership=self.adminship, field=self.notProtectedField, value='')
###############################################################################################
## CREATION TESTS ##
###############################################################################################
def try_create(self, u, m, f, s):
self.client.force_authenticate(user=u)
r = self.client.post(self.group_member_value_url, {'membership': m.id, 'field': f.id, 'value': ''}, format='json')
self.assertEqual(r.status_code, s)
def test_create_memberA_in_membershipA_notprotected(self):
self.try_create(self.memberA, self.membershipA, self.notProtectedFieldCr, status.HTTP_201_CREATED)
def test_create_memberA_in_membershipB_notprotected(self):
self.try_create(self.memberA, self.membershipB, self.notProtectedFieldCr, status.HTTP_403_FORBIDDEN)
def test_create_admin_in_membershipA_notprotected(self):
self.try_create(self.admin, self.membershipA, self.notProtectedFieldCr, status.HTTP_201_CREATED)
def test_create_admin_in_adminship_notprotected(self):
self.try_create(self.admin, self.adminship, self.notProtectedFieldCr, status.HTTP_201_CREATED)
def test_create_memberA_in_membershipA_protected(self):
self.try_create(self.memberA, self.membershipA, self.protectedFieldCr, status.HTTP_403_FORBIDDEN)
def test_create_memberA_in_membershipB_protected(self):
self.try_create(self.memberA, self.membershipB, self.protectedFieldCr, status.HTTP_403_FORBIDDEN)
def test_create_admin_in_membershipA_protected(self):
self.try_create(self.admin, self.membershipA, self.protectedFieldCr, status.HTTP_201_CREATED)
def test_create_admin_in_adminship_protected(self):
self.try_create(self.admin, self.adminship, self.protectedFieldCr, status.HTTP_201_CREATED)
def test_create_already_existing(self):
self.try_create(self.admin, self.adminship, self.protectedField, status.HTTP_400_BAD_REQUEST)
###############################################################################################
## UPDATE TESTS ##
###############################################################################################
def try_update(self, u, v, s):
uv = GroupFieldValueSerializer(v).data
uv['value'] = "Test"
self.client.force_authenticate(user=u)
r = self.client.put(self.group_member_value_url + str(v.id) + '/', uv, format='json')
self.assertEqual(r.status_code, s)
if s == status.HTTP_200_OK:
self.assertEqual(GroupFieldValueSerializer(GroupFieldValue.objects.all().get(id=v.id)).data, uv)
def test_update_memberA_in_valueA_notprotected(self):
self.try_update(self.memberA, self.notProtectedValueA, status.HTTP_200_OK)
def test_update_memberA_in_valueB_notprotected(self):
self.try_update(self.memberA, self.notProtectedValueB, status.HTTP_403_FORBIDDEN)
def test_update_admin_in_valueA_notprotected(self):
self.try_update(self.admin, self.notProtectedValueA, status.HTTP_200_OK)
def test_update_admin_in_valueAdmin_notprotected(self):
self.try_update(self.admin, self.notProtectedValueAdmin, status.HTTP_200_OK)
def test_update_memberA_in_valueA_protected(self):
self.try_update(self.memberA, self.protectedValueA, status.HTTP_403_FORBIDDEN)
def test_update_memberA_in_valueB_protected(self):
self.try_update(self.memberA, self.protectedValueB, status.HTTP_403_FORBIDDEN)
def test_update_admin_in_valueA_protected(self):
self.try_update(self.admin, self.protectedValueA, status.HTTP_200_OK)
def test_update_admin_in_valueAdmin_protected(self):
self.try_update(self.admin, self.protectedValueAdmin, status.HTTP_200_OK)
=======
###############################################################################################
## DESTROY TESTS ##
###############################################################################################
def try_destroy(self, u, v, s):
self.client.force_authenticate(user=u)
r = self.client.delete(self.group_member_value_url + str(v.id) + '/', format='json')
self.assertEqual(r.status_code, s)
def test_destroy_memberA_in_valueA_notprotected(self):
self.try_destroy(self.memberA, self.notProtectedValueA, status.HTTP_204_NO_CONTENT)
def test_destroy_memberA_in_valueB_notprotected(self):
self.try_destroy(self.memberA, self.notProtectedValueB, status.HTTP_403_FORBIDDEN)
def test_destroy_admin_in_valueA_notprotected(self):
self.try_destroy(self.admin, self.notProtectedValueA, status.HTTP_204_NO_CONTENT)
def test_destroy_admin_in_valueAdmin_notprotected(self):
self.try_destroy(self.admin, self.notProtectedValueAdmin, status.HTTP_204_NO_CONTENT)
def test_destroy_memberA_in_valueA_protected(self):
self.try_destroy(self.memberA, self.protectedValueA, status.HTTP_403_FORBIDDEN)
def test_destroy_memberA_in_valueB_protected(self):
self.try_destroy(self.memberA, self.protectedValueB, status.HTTP_403_FORBIDDEN)
def test_destroy_admin_in_valueA_protected(self):
self.try_destroy(self.admin, self.protectedValueA, status.HTTP_204_NO_CONTENT)
def test_destroy_admin_in_valueAdmin_protected(self):
self.try_destroy(self.admin, self.protectedValueAdmin, status.HTTP_204_NO_CONTENT)
###############################################################################################
## LIST TESTS ##
###############################################################################################
# def try_delete(self, u, f, s):
# self.client.force_authenticate(user=u)
# r = self.client.post(self.group_field_url + str(f.id) + '/destroy', format='json')
# self.assertEqual(r.status_code, s)
# def test_delete_nomember_in_secretgr(self):
# self.try_delete(self.nomember, self.secretGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_nomember_in_normalgr(self):
# self.try_delete(self.nomember, self.normalGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_nomember_in_publicgr(self):
# self.try_delete(self.nomember, self.publicGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_member_in_secretgr(self):
# self.try_delete(self.member, self.secretGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_member_in_normalgr(self):
# self.try_delete(self.member, self.normalGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_member_in_publicgr(self):
# self.try_delete(self.member, self.publicGroupField, status.HTTP_403_FORBIDDEN)
# def test_delete_admin_in_secretgr(self):
# self.try_delete(self.admin, self.secretGroupField, status.HTTP_204_NO_CONTENT)
# def test_delete_admin_in_normalgr(self):
# self.try_delete(self.admin, self.normalGroupField, status.HTTP_204_NO_CONTENT)
# def test_delete_admin_in_publicgr(self):
# self.try_delete(self.admin, self.publicGroupField, status.HTTP_204_NO_CONTENT)
###############################################################################################
## RETRIEVE TESTS ##
###############################################################################################
# No need for retrieve tests as retrieving a group field value has the same permission as retrieving the equivalent group field.
###############################################################################################
## VALIDATION TESTS ##
###############################################################################################
def try_validation(self, t, a, v, p):
self.client.force_authenticate(user=self.admin)
f = GroupField.objects.create(group=self.group, name="Champ de test", type=t, accept=a)
r = self.client.post(self.group_member_value_url, {'membership': self.adminship.id, 'field': f.id, 'value': v}, format='json')
if p:
self.assertEqual(r.status_code, status.HTTP_201_CREATED)
else:
self.assertEqual(r.status_code, status.HTTP_400_BAD_REQUEST)
def test_validation_number(self):
self.try_validation(GroupField.TYPE_NUMBER, '', 0, True)
self.try_validation(GroupField.TYPE_NUMBER, '_0', 0, True)
self.try_validation(GroupField.TYPE_NUMBER, '_10', -66, True)
self.try_validation(GroupField.TYPE_NUMBER, '-10_42', 12, True)
self.try_validation(GroupField.TYPE_NUMBER, '-10_', 28, True)
self.try_validation(GroupField.TYPE_NUMBER, '_-4', -1, False)
self.try_validation(GroupField.TYPE_NUMBER, '2_', 1, False)
self.try_validation(GroupField.TYPE_NUMBER, '10_42', 102, False)
self.try_validation(GroupField.TYPE_NUMBER, '-10_-42', 5, False)
def test_validation_email(self):
self.try_validation(GroupField.TYPE_EMAIL, '', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '.test', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, 'test', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, 'test.test', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '@test.test', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '@test.test toto.toto', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '@test.test test', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '.fr .com .edu .goov .ko.uka .tg', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '.fr .com .edu .goov .ko.uka .tg', '[email protected]', True)
self.try_validation(GroupField.TYPE_EMAIL, '.test.test', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, '.toto.test', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, 'toto.test', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, '@toto.test', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, '.toto', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, '.fr .com .gmail .tg @troll.tg', '[email protected]', False)
self.try_validation(GroupField.TYPE_EMAIL, '.fr .com .gmail .tg @troll.tg', 'test@fr', False)
| SRLKilling/sigma-backend | data-server/django_app/sigma_core/tests/test_group_field_value.py | Python | agpl-3.0 | 13,831 |
"""
Django settings for resty project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#6nke%q&fii$l+d7r+#(zrj70ae2tqvu2ud6+-6y^%6_5l!xz2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.staticfiles',
'resty.apps.properties',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'resty.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'resty.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
| alexdzul/propiedades | resty/settings.py | Python | mit | 2,763 |
# -*- coding: utf-8 -*-
import json
import requests
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class url(object):
LINE_HOST_DOMAIN = 'https://gd2.line.naver.jp'
LINE_NOMA_DOMAIN = 'https://gf.line.naver.jp'
LINE_SECO_DOMAIN = 'https://gsx.line.naver.jp'
LINE_AUTH_QUERY_PATH = '/api/v4/TalkService.do'
LINE_SESSION_LINE_QUERY_PATH = '/authct/v1/keys/line'
LINE_SESSION_NAVER_QUERY_PATH = '/authct/v1/keys/naver'
LINE_API_QUERY_PATH_FIR = '/S4'
LINE_API_QUERY_PATH_SEC = '/F4'
LINE_POLL_QUERY_PATH_FIR = '/P4'
LINE_POLL_QUERY_PATH_SEC = '/E4'
LINE_POLL_QUERY_PATH_THI = '/H4'
LINE_NORMAL_POLL_QUERY_PATH = '/NP4'
LINE_COMPACT_MESSAGE_QUERY_PATH = '/C5'
LINE_CALL_QUERY_PATH = '/V4'
LINE_CERTIFICATE_PATH = '/Q'
LINE_CHAN_QUERY_PATH = '/CH4'
LINE_SHOP_QUERY_PATH = '/SHOP4'
UserAgent = 'DESKTOP:MAC:10.10.2-YOSEMITE-x64(4.5.0)'
AppName = 'DESKTOPMAC 10.10.2-YOSEMITE-x64 MAC 4.5.0'
port = 443
systemname = 'Doomsday'
ip = '8.8.0.0'
_session = requests.session()
Headers = {}
_pincode = None
@classmethod
def parseUrl(self, path):
return self.LINE_HOST_DOMAIN + path
@classmethod
def get_json(self, url, allowHeader=False):
if allowHeader is False:
return json.loads(self._session.get(url).text)
else:
return json.loads(self._session.get(url, headers=self.Headers).text)
@classmethod
def set_Headers(self, argument, value):
self.Headers[argument] = value
| didim354/Bot | LineAlpha/LineApi/LineServer.py | Python | gpl-3.0 | 1,701 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
import sys
import collections
from numpy import exp, isnan, log, max, mean, min, nan, zeros as Content
from rbnics.utils.io.csv_io import CSVIO
from rbnics.utils.io.folders import Folders
class PerformanceTable(object):
# Storage for class methods
_suppressed_groups = list()
_preprocessor_setitem = dict()
def __init__(self, testing_set):
self._columns = dict() # string to Content matrix
self._columns_operations = dict() # string to tuple
self._columns_not_implemented = dict() # string to bool
self._rows_not_implemented = dict() # string to dict of bool
self._groups = dict() # string to list
self._group_names_sorted = list()
self._len_testing_set = len(testing_set)
self._Nmin = 1
self._Nmax = 0
def set_Nmin(self, Nmin):
self._Nmin = Nmin
def set_Nmax(self, Nmax):
self._Nmax = Nmax
def add_column(self, column_name, group_name, operations):
assert self._Nmax > 0
assert self._Nmax >= self._Nmin
assert column_name not in self._columns and column_name not in self._columns_operations
self._columns[column_name] = Content((self._Nmax - self._Nmin + 1, self._len_testing_set))
self._columns_not_implemented[column_name] = None # will be set to a bool
self._rows_not_implemented[column_name] = {
n: None for n in range(self._Nmax - self._Nmin + 1)} # will be set to a bool
if group_name not in self._groups:
self._groups[group_name] = list()
self._group_names_sorted.append(group_name) # preserve the ordering provided by the user
self._groups[group_name].append(column_name)
if isinstance(operations, str):
self._columns_operations[column_name] = (operations,)
elif isinstance(operations, tuple):
self._columns_operations[column_name] = operations
else:
raise ValueError("Invalid operation in PerformanceTable")
@classmethod
def suppress_group(cls, group_name):
cls._suppressed_groups.append(group_name)
@classmethod
def clear_suppressed_groups(cls):
cls._suppressed_groups = list()
@classmethod
def preprocess_setitem(cls, group_name, function):
cls._preprocessor_setitem[group_name] = function
@classmethod
def clear_setitem_preprocessing(cls):
cls._preprocessor_setitem.clear()
def __getitem__(self, args):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
assert self._columns_not_implemented[column_name] in (True, False)
assert self._rows_not_implemented[column_name][N - self._Nmin] in (True, False)
if (not self._columns_not_implemented[column_name]
and not self._rows_not_implemented[column_name][N - self._Nmin]):
return self._columns[column_name][N - self._Nmin, mu_index]
else:
return CustomNotImplementedAfterDiv
def __setitem__(self, args, value):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
if is_not_implemented(value):
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] is None:
self._columns_not_implemented[column_name] = True
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, True)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = True
else:
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] in (None, True):
self._columns_not_implemented[column_name] = False
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, False)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = False
if column_name not in self._preprocessor_setitem:
self._columns[column_name][N - self._Nmin, mu_index] = value
else:
self._columns[column_name][N - self._Nmin, mu_index] = self._preprocessor_setitem[column_name](value)
def _process(self):
groups_content = collections.OrderedDict()
for group in self._group_names_sorted:
# Skip suppresed groups
if group in self._suppressed_groups:
continue
# Populate all columns
columns = list()
for column in self._groups[group]:
assert self._columns_not_implemented[column] in (True, False)
if self._columns_not_implemented[column] is False:
columns.append(column)
if len(columns) == 0:
continue
# Storage for print
table_index = list() # of strings
table_header = dict() # from string to string
table_content = dict() # from string to Content array
column_size = dict() # from string to int
# First column should be the reduced space dimension
table_index.append("N")
table_header["N"] = "N"
table_content["N"] = list(range(self._Nmin, self._Nmax + 1))
column_size["N"] = max([max([len(str(x)) for x in table_content["N"]]), len("N")])
# Then fill in with postprocessed data
for column in columns:
for operation in self._columns_operations[column]:
# Set header
if operation in ("min", "max"):
current_table_header = operation + "(" + column + ")"
current_table_index = operation + "_" + column
elif operation == "mean":
current_table_header = "gmean(" + column + ")"
current_table_index = "gmean_" + column
else:
raise ValueError("Invalid operation in PerformanceTable")
table_index.append(current_table_index)
table_header[current_table_index] = current_table_header
# Compute the required operation of each column over the second index (testing set)
table_content[current_table_index] = Content((self._Nmax - self._Nmin + 1,))
for n in range(self._Nmin, self._Nmax + 1):
assert self._rows_not_implemented[column][n - self._Nmin] in (None, True, False)
if self._rows_not_implemented[column][n - self._Nmin] is False:
if operation == "min":
current_table_content = min(self._columns[column][n - self._Nmin, :])
elif operation == "mean":
data = self._columns[column][n - self._Nmin, :]
if not data.any(): # all zeros
current_table_content = 0.
else:
data[data == 0.] = sys.float_info.epsilon
current_table_content = exp(mean(log(data)))
elif operation == "max":
current_table_content = max(self._columns[column][n - self._Nmin, :])
else:
raise ValueError("Invalid operation in PerformanceTable")
table_content[current_table_index][n - self._Nmin] = current_table_content
else:
table_content[current_table_index][n - self._Nmin] = nan
# Get the width of the columns
column_size[current_table_index] = max([max([
len(str(x)) for x in table_content[current_table_index]]), len(current_table_header)])
# Save content
assert group not in groups_content
groups_content[group] = (table_index, table_header, table_content, column_size)
return groups_content
def __str__(self):
groups_content = self._process()
output = ""
for (group, (table_index, table_header, table_content, column_size)) in groups_content.items():
table_index_without_N = table_index[1:]
# Prepare formatter for string conversion
formatter = ""
for (column_index, column_name) in enumerate(table_index):
formatter += "{" + str(column_index) + ":<{" + column_name + "}}"
if column_index < len(table_index) - 1:
formatter += "\t"
# Print the header
current_line = list()
for t in table_index:
current_line.append(table_header[t])
output += formatter.format(*current_line, **column_size) + "\n"
# Print the current row, only if its content was set to NotImplemented
for n in range(self._Nmin, self._Nmax + 1):
current_line = list()
all_not_implemented = all(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N)
assert any(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N) is all_not_implemented
if not all_not_implemented:
for t in table_index:
current_line.append(table_content[t][n - self._Nmin])
output += formatter.format(*current_line, **column_size) + "\n"
output += "\n"
return output[:-2] # remove the last two newlines
def save(self, directory, filename):
full_directory = Folders.Folder(os.path.join(str(directory), filename))
full_directory.create()
groups_content = self._process()
for (group, (table_index, table_header, table_content, _)) in groups_content.items():
table_index_without_N = table_index[1:]
current_file = list()
# Store the header
current_file.append([table_header[t] for t in table_index])
# Store the current row, only if its content was set to NotImplemented
for n in range(self._Nmin, self._Nmax + 1):
all_not_implemented = all(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N)
assert any(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N) is all_not_implemented
if not all_not_implemented:
current_file.append([table_content[t][n - self._Nmin] for t in table_index])
# Save
CSVIO.save_file(current_file, full_directory, group)
def load(self, directory, filename):
raise RuntimeError("PerformanceTable.load has not been implemented yet")
class CustomNotImplementedType(object):
def __init__(self):
pass
CustomNotImplemented = CustomNotImplementedType()
def is_not_implemented(value):
if value is NotImplemented:
return True
elif value is CustomNotImplemented:
return True
elif hasattr(value, "__iter__"):
each_is_not_implemented = [is_not_implemented(v) for v in value]
assert all(b == each_is_not_implemented[0] for b in each_is_not_implemented)
return each_is_not_implemented[0]
else:
return False
class CustomNotImplementedAfterDivType(CustomNotImplementedType):
def __init__(self):
pass
def __truediv__(self, other):
return CustomNotImplemented
def __rtruediv__(self, other):
return CustomNotImplemented
def __itruediv__(self, other):
return CustomNotImplemented
CustomNotImplementedAfterDiv = CustomNotImplementedAfterDivType()
| mathLab/RBniCS | rbnics/utils/io/performance_table.py | Python | lgpl-3.0 | 12,306 |
import pytest
from mock import Mock
from spacy.matcher import Matcher
from spacy.tokens import Doc, Token, Span
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def matcher(en_vocab):
rules = {
"JS": [[{"ORTH": "JavaScript"}]],
"GoogleNow": [[{"ORTH": "Google"}, {"ORTH": "Now"}]],
"Java": [[{"LOWER": "java"}]],
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns)
return matcher
def test_matcher_from_api_docs(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "test"}]
assert len(matcher) == 0
matcher.add("Rule", [pattern])
assert len(matcher) == 1
matcher.remove("Rule")
assert "Rule" not in matcher
matcher.add("Rule", [pattern])
assert "Rule" in matcher
on_match, patterns = matcher.get("Rule")
assert len(patterns[0])
def test_matcher_from_usage_docs(en_vocab):
text = "Wow 😀 This is really cool! 😂 😂"
doc = Doc(en_vocab, words=text.split(" "))
pos_emoji = ["😀", "😃", "😂", "🤣", "😊", "😍"]
pos_patterns = [[{"ORTH": emoji}] for emoji in pos_emoji]
def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY":
doc.sentiment += 0.1
span = doc[start:end]
with doc.retokenize() as retokenizer:
retokenizer.merge(span)
token = doc[start]
token.vocab[token.text].norm_ = "happy emoji"
matcher = Matcher(en_vocab)
matcher.add("HAPPY", pos_patterns, on_match=label_sentiment)
matcher(doc)
assert doc.sentiment != 0
assert doc[1].norm_ == "happy emoji"
def test_matcher_len_contains(matcher):
assert len(matcher) == 3
matcher.add("TEST", [[{"ORTH": "test"}]])
assert "TEST" in matcher
assert "TEST2" not in matcher
def test_matcher_add_new_api(en_vocab):
doc = Doc(en_vocab, words=["a", "b"])
patterns = [[{"TEXT": "a"}], [{"TEXT": "a"}, {"TEXT": "b"}]]
matcher = Matcher(en_vocab)
on_match = Mock()
matcher = Matcher(en_vocab)
matcher.add("NEW_API", patterns)
assert len(matcher(doc)) == 2
matcher = Matcher(en_vocab)
on_match = Mock()
matcher.add("NEW_API_CALLBACK", patterns, on_match=on_match)
assert len(matcher(doc)) == 2
assert on_match.call_count == 2
def test_matcher_no_match(matcher):
doc = Doc(matcher.vocab, words=["I", "like", "cheese", "."])
assert matcher(doc) == []
def test_matcher_match_start(matcher):
doc = Doc(matcher.vocab, words=["JavaScript", "is", "good"])
assert matcher(doc) == [(matcher.vocab.strings["JS"], 0, 1)]
def test_matcher_match_end(matcher):
words = ["I", "like", "java"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [(doc.vocab.strings["Java"], 2, 3)]
def test_matcher_match_middle(matcher):
words = ["I", "like", "Google", "Now", "best"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [(doc.vocab.strings["GoogleNow"], 2, 4)]
def test_matcher_match_multi(matcher):
words = ["I", "like", "Google", "Now", "and", "java", "best"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [
(doc.vocab.strings["GoogleNow"], 2, 4),
(doc.vocab.strings["Java"], 5, 6),
]
def test_matcher_empty_dict(en_vocab):
"""Test matcher allows empty token specs, meaning match on any token."""
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=["a", "b", "c"])
matcher.add("A.C", [[{"ORTH": "a"}, {}, {"ORTH": "c"}]])
matches = matcher(doc)
assert len(matches) == 1
assert matches[0][1:] == (0, 3)
matcher = Matcher(en_vocab)
matcher.add("A.", [[{"ORTH": "a"}, {}]])
matches = matcher(doc)
assert matches[0][1:] == (0, 2)
def test_matcher_operator_shadow(en_vocab):
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=["a", "b", "c"])
pattern = [{"ORTH": "a"}, {"IS_ALPHA": True, "OP": "+"}, {"ORTH": "c"}]
matcher.add("A.C", [pattern])
matches = matcher(doc)
assert len(matches) == 1
assert matches[0][1:] == (0, 3)
def test_matcher_match_zero(matcher):
words1 = 'He said , " some words " ...'.split()
words2 = 'He said , " some three words " ...'.split()
pattern1 = [
{"ORTH": '"'},
{"OP": "!", "IS_PUNCT": True},
{"OP": "!", "IS_PUNCT": True},
{"ORTH": '"'},
]
pattern2 = [
{"ORTH": '"'},
{"IS_PUNCT": True},
{"IS_PUNCT": True},
{"IS_PUNCT": True},
{"ORTH": '"'},
]
matcher.add("Quote", [pattern1])
doc = Doc(matcher.vocab, words=words1)
assert len(matcher(doc)) == 1
doc = Doc(matcher.vocab, words=words2)
assert len(matcher(doc)) == 0
matcher.add("Quote", [pattern2])
assert len(matcher(doc)) == 0
def test_matcher_match_zero_plus(matcher):
words = 'He said , " some words " ...'.split()
pattern = [{"ORTH": '"'}, {"OP": "*", "IS_PUNCT": False}, {"ORTH": '"'}]
matcher = Matcher(matcher.vocab)
matcher.add("Quote", [pattern])
doc = Doc(matcher.vocab, words=words)
assert len(matcher(doc)) == 1
def test_matcher_match_one_plus(matcher):
control = Matcher(matcher.vocab)
control.add("BasicPhilippe", [[{"ORTH": "Philippe"}]])
doc = Doc(control.vocab, words=["Philippe", "Philippe"])
m = control(doc)
assert len(m) == 2
pattern = [{"ORTH": "Philippe"}, {"ORTH": "Philippe", "OP": "+"}]
matcher.add("KleenePhilippe", [pattern])
m = matcher(doc)
assert len(m) == 1
def test_matcher_any_token_operator(en_vocab):
"""Test that patterns with "any token" {} work with operators."""
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "test"}, {"OP": "*"}]])
doc = Doc(en_vocab, words=["test", "hello", "world"])
matches = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches) == 3
assert matches[0] == "test"
assert matches[1] == "test hello"
assert matches[2] == "test hello world"
@pytest.mark.usefixtures("clean_underscore")
def test_matcher_extension_attribute(en_vocab):
matcher = Matcher(en_vocab)
get_is_fruit = lambda token: token.text in ("apple", "banana")
Token.set_extension("is_fruit", getter=get_is_fruit, force=True)
pattern = [{"ORTH": "an"}, {"_": {"is_fruit": True}}]
matcher.add("HAVING_FRUIT", [pattern])
doc = Doc(en_vocab, words=["an", "apple"])
matches = matcher(doc)
assert len(matches) == 1
doc = Doc(en_vocab, words=["an", "aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_set_value(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"IN": ["an", "a"]}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "apple"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_set_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"IN": ["a", "the"]}, "OP": "?"}, {"ORTH": "house"}]
matcher.add("DET_HOUSE", [pattern])
doc = Doc(en_vocab, words=["In", "a", "house"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["my", "house"])
matches = matcher(doc)
assert len(matches) == 1
def test_matcher_subset_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"MORPH": {"IS_SUBSET": ["Feat=Val", "Feat2=Val2"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val")
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val|Feat2=Val2")
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3")
assert len(matcher(doc)) == 2
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3|Feat4=Val4")
assert len(matcher(doc)) == 2
# IS_SUBSET acts like "IN" for attrs other than MORPH
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUBSET": ["A", "B"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 1
# IS_SUBSET with an empty list matches nothing
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUBSET": []}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 0
def test_matcher_superset_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"MORPH": {"IS_SUPERSET": ["Feat=Val", "Feat2=Val2", "Feat3=Val3"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat=Val|Feat2=Val2")
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3|Feat4=Val4")
assert len(matcher(doc)) == 1
# IS_SUPERSET with more than one value only matches for MORPH
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": ["A", "B"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 0
# IS_SUPERSET with one value is the same as ==
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": ["A"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 1
# IS_SUPERSET with an empty value matches everything
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": []}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 3
def test_matcher_morph_handling(en_vocab):
# order of features in pattern doesn't matter
matcher = Matcher(en_vocab)
pattern1 = [{"MORPH": {"IN": ["Feat1=Val1|Feat2=Val2"]}}]
pattern2 = [{"MORPH": {"IN": ["Feat2=Val2|Feat1=Val1"]}}]
matcher.add("M", [pattern1])
matcher.add("N", [pattern2])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat2=Val2|Feat1=Val1")
assert len(matcher(doc)) == 2
doc[0].set_morph("Feat1=Val1|Feat2=Val2")
assert len(matcher(doc)) == 2
# multiple values are split
matcher = Matcher(en_vocab)
pattern1 = [{"MORPH": {"IS_SUPERSET": ["Feat1=Val1", "Feat2=Val2"]}}]
pattern2 = [{"MORPH": {"IS_SUPERSET": ["Feat1=Val1", "Feat1=Val3", "Feat2=Val2"]}}]
matcher.add("M", [pattern1])
matcher.add("N", [pattern2])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat2=Val2,Val3|Feat1=Val1")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat1=Val1,Val3|Feat2=Val2")
assert len(matcher(doc)) == 2
def test_matcher_regex(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"REGEX": r"(?:a|an)"}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "hi"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_regex_shape(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"SHAPE": {"REGEX": r"^[^x]+$"}}]
matcher.add("NON_ALPHA", [pattern])
doc = Doc(en_vocab, words=["99", "problems", "!"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 0
@pytest.mark.parametrize(
"cmp, bad",
[
("==", ["a", "aaa"]),
("!=", ["aa"]),
(">=", ["a"]),
("<=", ["aaa"]),
(">", ["a", "aa"]),
("<", ["aa", "aaa"]),
],
)
def test_matcher_compare_length(en_vocab, cmp, bad):
matcher = Matcher(en_vocab)
pattern = [{"LENGTH": {cmp: 2}}]
matcher.add("LENGTH_COMPARE", [pattern])
doc = Doc(en_vocab, words=["a", "aa", "aaa"])
matches = matcher(doc)
assert len(matches) == len(doc) - len(bad)
doc = Doc(en_vocab, words=bad)
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_extension_set_membership(en_vocab):
matcher = Matcher(en_vocab)
get_reversed = lambda token: "".join(reversed(token.text))
Token.set_extension("reversed", getter=get_reversed, force=True)
pattern = [{"_": {"reversed": {"IN": ["eyb", "ih"]}}}]
matcher.add("REVERSED", [pattern])
doc = Doc(en_vocab, words=["hi", "bye", "hello"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_basic_check(en_vocab):
matcher = Matcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns
pattern = [{"TEXT": "hello"}, {"TEXT": "world"}]
with pytest.raises(ValueError):
matcher.add("TEST", pattern)
def test_attr_pipeline_checks(en_vocab):
doc1 = Doc(en_vocab, words=["Test"])
doc1[0].dep_ = "ROOT"
doc2 = Doc(en_vocab, words=["Test"])
doc2[0].tag_ = "TAG"
doc2[0].pos_ = "X"
doc2[0].set_morph("Feat=Val")
doc2[0].lemma_ = "LEMMA"
doc3 = Doc(en_vocab, words=["Test"])
# DEP requires DEP
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"DEP": "a"}]])
matcher(doc1)
with pytest.raises(ValueError):
matcher(doc2)
with pytest.raises(ValueError):
matcher(doc3)
# errors can be suppressed if desired
matcher(doc2, allow_missing=True)
matcher(doc3, allow_missing=True)
# TAG, POS, LEMMA require those values
for attr in ("TAG", "POS", "LEMMA"):
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{attr: "a"}]])
matcher(doc2)
with pytest.raises(ValueError):
matcher(doc1)
with pytest.raises(ValueError):
matcher(doc3)
# TEXT/ORTH only require tokens
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}]])
matcher(doc1)
matcher(doc2)
matcher(doc3)
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"TEXT": "a"}]])
matcher(doc1)
matcher(doc2)
matcher(doc3)
@pytest.mark.parametrize(
"pattern,text",
[
([{"IS_ALPHA": True}], "a"),
([{"IS_ASCII": True}], "a"),
([{"IS_DIGIT": True}], "1"),
([{"IS_LOWER": True}], "a"),
([{"IS_UPPER": True}], "A"),
([{"IS_TITLE": True}], "Aaaa"),
([{"IS_PUNCT": True}], "."),
([{"IS_SPACE": True}], "\n"),
([{"IS_BRACKET": True}], "["),
([{"IS_QUOTE": True}], '"'),
([{"IS_LEFT_PUNCT": True}], "``"),
([{"IS_RIGHT_PUNCT": True}], "''"),
([{"IS_STOP": True}], "the"),
([{"SPACY": True}], "the"),
([{"LIKE_NUM": True}], "1"),
([{"LIKE_URL": True}], "http://example.com"),
([{"LIKE_EMAIL": True}], "[email protected]"),
],
)
def test_matcher_schema_token_attributes(en_vocab, pattern, text):
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=text.split(" "))
matcher.add("Rule", [pattern])
assert len(matcher) == 1
matches = matcher(doc)
assert len(matches) == 1
def test_matcher_valid_callback(en_vocab):
"""Test that on_match can only be None or callable."""
matcher = Matcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("TEST", [[{"TEXT": "test"}]], on_match=[])
matcher(Doc(en_vocab, words=["test"]))
def test_matcher_callback(en_vocab):
mock = Mock()
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "test"}]
matcher.add("Rule", [pattern], on_match=mock)
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
matches = matcher(doc)
mock.assert_called_once_with(matcher, doc, 0, matches)
def test_matcher_span(matcher):
text = "JavaScript is good but Java is better"
doc = Doc(matcher.vocab, words=text.split())
span_js = doc[:3]
span_java = doc[4:]
assert len(matcher(doc)) == 2
assert len(matcher(span_js)) == 1
assert len(matcher(span_java)) == 1
def test_matcher_as_spans(matcher):
"""Test the new as_spans=True API."""
text = "JavaScript is good but Java is better"
doc = Doc(matcher.vocab, words=text.split())
matches = matcher(doc, as_spans=True)
assert len(matches) == 2
assert isinstance(matches[0], Span)
assert matches[0].text == "JavaScript"
assert matches[0].label_ == "JS"
assert isinstance(matches[1], Span)
assert matches[1].text == "Java"
assert matches[1].label_ == "Java"
def test_matcher_deprecated(matcher):
doc = Doc(matcher.vocab, words=["hello", "world"])
with pytest.warns(DeprecationWarning) as record:
for _ in matcher.pipe([doc]):
pass
assert record.list
assert "spaCy v3.0" in str(record.list[0].message)
def test_matcher_remove_zero_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"OP": "!"}]
matcher.add("Rule", [pattern])
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
matches = matcher(doc)
assert len(matches) == 0
assert "Rule" in matcher
matcher.remove("Rule")
assert "Rule" not in matcher
def test_matcher_no_zero_length(en_vocab):
doc = Doc(en_vocab, words=["a", "b"], tags=["A", "B"])
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"TAG": "C", "OP": "?"}]])
assert len(matcher(doc)) == 0
| spacy-io/spaCy | spacy/tests/matcher/test_matcher_api.py | Python | mit | 17,725 |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.xaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/python-api | packages/python/plotly/plotly/graph_objs/layout/xaxis/_tickfont.py | Python | mit | 8,438 |
from lxml import etree
import sys
data = open(sys.argv[1],'r').read()
doc = etree.XML(data,etree.XMLParser(remove_blank_text=True))
for parent in doc.xpath('//*[./*]'): # Search for parent elements
parent[:] = sorted(parent,key=lambda x: x.get("name"))
print etree.tostring(doc,pretty_print=True) | CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/sortXMLOnAttribute.py | Python | gpl-3.0 | 301 |
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import gfm
from test_case import TestCase
class TestSemiSaneLists(TestCase):
def setUp(self):
self.semi_sane_lists = gfm.SemiSaneListExtension([])
def test_doesnt_join_ul_and_ol(self):
self.assert_renders("""
<ul>
<li>foo</li>
<li>bar</li>
</ul>
<ol>
<li>baz</li>
<li>bip</li>
</ol>
""", """
* foo
* bar
1. baz
1. bip
""", [self.semi_sane_lists])
def test_doesnt_require_blank_line_between_list_types(self):
self.assert_renders("""
<ol>
<li>ordered</li>
<li>also ordered</li>
</ol>
""", """
1. ordered
* also ordered
""", [self.semi_sane_lists])
| googlearchive/py-gfm | tests/test_semi_sane_lists.py | Python | bsd-3-clause | 977 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from erpnext.erpnext_integrations.doctype.plaid_settings.plaid_settings import plaid_configuration, add_account_type, add_account_subtype, new_bank_transaction, add_bank_accounts
import json
from frappe.utils.response import json_handler
from erpnext.accounts.doctype.journal_entry.journal_entry import get_default_bank_cash_account
class TestPlaidSettings(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
for bt in frappe.get_all("Bank Transaction"):
doc = frappe.get_doc("Bank Transaction", bt.name)
doc.cancel()
doc.delete()
for ba in frappe.get_all("Bank Account"):
frappe.get_doc("Bank Account", ba.name).delete()
for at in frappe.get_all("Bank Account Type"):
frappe.get_doc("Bank Account Type", at.name).delete()
for ast in frappe.get_all("Bank Account Subtype"):
frappe.get_doc("Bank Account Subtype", ast.name).delete()
def test_plaid_disabled(self):
frappe.db.set_value("Plaid Settings", None, "enabled", 0)
self.assertTrue(plaid_configuration() == "disabled")
def test_add_account_type(self):
add_account_type("brokerage")
self.assertEqual(frappe.get_doc("Bank Account Type", "brokerage").name, "brokerage")
def test_add_account_subtype(self):
add_account_subtype("loan")
self.assertEqual(frappe.get_doc("Bank Account Subtype", "loan").name, "loan")
def test_default_bank_account(self):
if not frappe.db.exists("Bank", "Citi"):
frappe.get_doc({
"doctype": "Bank",
"bank_name": "Citi"
}).insert()
bank_accounts = {
'account': {
'subtype': 'checking',
'mask': '0000',
'type': 'depository',
'id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'name': 'Plaid Checking'
},
'account_id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'link_session_id': 'db673d75-61aa-442a-864f-9b3f174f3725',
'accounts': [{
'type': 'depository',
'subtype': 'checking',
'mask': '0000',
'id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'name': 'Plaid Checking'
}],
'institution': {
'institution_id': 'ins_6',
'name': 'Citi'
}
}
bank = json.dumps(frappe.get_doc("Bank", "Citi").as_dict(), default=json_handler)
company = frappe.db.get_single_value('Global Defaults', 'default_company')
frappe.db.set_value("Company", company, "default_bank_account", None)
self.assertRaises(frappe.ValidationError, add_bank_accounts, response=bank_accounts, bank=bank, company=company)
def test_new_transaction(self):
if not frappe.db.exists("Bank", "Citi"):
frappe.get_doc({
"doctype": "Bank",
"bank_name": "Citi"
}).insert()
bank_accounts = {
'account': {
'subtype': 'checking',
'mask': '0000',
'type': 'depository',
'id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'name': 'Plaid Checking'
},
'account_id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'link_session_id': 'db673d75-61aa-442a-864f-9b3f174f3725',
'accounts': [{
'type': 'depository',
'subtype': 'checking',
'mask': '0000',
'id': '6GbM6RRQgdfy3lAqGz4JUnpmR948WZFg8DjQK',
'name': 'Plaid Checking'
}],
'institution': {
'institution_id': 'ins_6',
'name': 'Citi'
}
}
bank = json.dumps(frappe.get_doc("Bank", "Citi").as_dict(), default=json_handler)
company = frappe.db.get_single_value('Global Defaults', 'default_company')
if frappe.db.get_value("Company", company, "default_bank_account") is None:
frappe.db.set_value("Company", company, "default_bank_account", get_default_bank_cash_account(company, "Cash").get("account"))
add_bank_accounts(bank_accounts, bank, company)
transactions = {
'account_owner': None,
'category': ['Food and Drink', 'Restaurants'],
'account_id': 'b4Jkp1LJDZiPgojpr1ansXJrj5Q6w9fVmv6ov',
'pending_transaction_id': None,
'transaction_id': 'x374xPa7DvUewqlR5mjNIeGK8r8rl3Sn647LM',
'unofficial_currency_code': None,
'name': 'INTRST PYMNT',
'transaction_type': 'place',
'amount': -4.22,
'location': {
'city': None,
'zip': None,
'store_number': None,
'lon': None,
'state': None,
'address': None,
'lat': None
},
'payment_meta': {
'reference_number': None,
'payer': None,
'payment_method': None,
'reason': None,
'payee': None,
'ppd_id': None,
'payment_processor': None,
'by_order_of': None
},
'date': '2017-12-22',
'category_id': '13005000',
'pending': False,
'iso_currency_code': 'USD'
}
new_bank_transaction(transactions)
self.assertTrue(len(frappe.get_all("Bank Transaction")) == 1) | gsnbng/erpnext | erpnext/erpnext_integrations/doctype/plaid_settings/test_plaid_settings.py | Python | agpl-3.0 | 4,706 |
# vim: ts=4:sw=4:expandtabs
__author__ = '[email protected]'
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from email_utils.tasks import send_mail
RESEND_EMAIL_PERMISSION = 'can_resend_email'
@python_2_unicode_compatible
class EmailMessage(models.Model):
RESEND_EMAIL_PERMISSION = RESEND_EMAIL_PERMISSION
to = models.CharField(max_length=256)
from_address = models.CharField(max_length=256, verbose_name=_('From'))
subject = models.CharField(max_length=256, blank=True)
body = models.TextField()
html_body = models.TextField(blank=True, verbose_name=_('HTML body'))
date_sent = models.DateTimeField()
delivery_successful = models.BooleanField()
error_message = models.CharField(max_length=256, blank=True)
class Meta:
app_label = 'email_utils'
verbose_name = _('Email message')
verbose_name_plural = _('Email messages')
permissions = [
(RESEND_EMAIL_PERMISSION, _('Can resend email')),
]
def __str__(self):
return "{self.date_sent:%Y-%m-%d %H:%M:%S} - {self.subject}".format(self=self)
def resend(self):
send_mail.apply_async((
self.subject,
self.body,
self.from_address,
self.to
), {'html_message': self.html_body})
| E7ernal/quizwhiz | email_utils/models/EmailMessage.py | Python | mit | 1,404 |
#!/usr/bin/env python
import os, sys, gzip, glob, re, jobs
def find_files(count = 1):
successfiles = []
for inputsize in jobs.inputsizes:
for ncores in jobs.ncores:
gtype = '%s-%s' % (inputsize, ncores)
globpath = '/home/tcarlson/prog/benchmarks/results/output-instrumented/elvis1-performance/*%s' % (gtype)
#print globpath
for f in glob.glob(globpath):
found_all=True
#print 'glob=', f
for ttype,bbfile in (('barrier_reuse_distance','barrier_reuse_distance-default-1/barrier_reuse_distance.txt.bb.gz'),):
p=os.path.join(f,'%s-default-1/success' % (ttype))
if not os.path.isfile(p):
#print 'path not found=', p
found_all = False
else:
#print 'PATH FOUND'
pass
if found_all:
for n,d in ((1,5),(1,2)):
dir = 'barrier_reuse_distance-%d_%d-default-1' % (n,d)
if not os.path.isfile(os.path.join(f,dir,'success')):
successfiles.append((f,(n,d),dir))
if len(successfiles) >= count:
return successfiles
return successfiles
def run(count = float('Inf')):
successfiles = find_files(count = count)
print 'Weight Reuse Distances: Found %d new results to weight.' % len(successfiles)
for f,(num,denom),dir in successfiles:
print f, num, denom, dir
powval = float(num) / float(denom)
#bbv_fn=os.path.join(f,'barrier_bbv-default-1/barrier_bbv.txt_count.gz')
reuse_fn=os.path.join(f,'barrier_reuse_distance-default-1/barrier_reuse_distance.txt')
#insn_fn=os.path.join(f,'barrier_bbv-default-1/barrier_bbv.txt_insncount.gz')
out_dir=os.path.join(f,dir)
out_fn=os.path.join(out_dir,'barrier_reuse_distance.txt.bb.gz')
try:
os.makedirs(out_dir)
except OSError:
pass
#if not os.path.isfile(bbv_fn):
# print "Combine: Warning: Unable to file a file:", bbv_fn
# continue
if not os.path.isfile(reuse_fn):
print "Combine: Warning: Unable to file a file:", reuse_fn
continue
#if not os.path.isfile(insn_fn):
# print "Combine: Warning: Unable to file a file:", bbv_fn
# continue
with open(reuse_fn, 'r') as fi:
maxhlen = 0
current_thread = 0
thread_data = []
barrier_data = []
for line in fi:
#m = re.search(r'Th:\s*(\d+) b:\s*(\d+)', line)
m = re.findall(r'\d+', line)
#print m
m = map(int, m)
th=m[0]
bar=m[1]
data=m[2:]
# Skip the first line because it contains the reuse data for pre-ROI
if bar == 0:
continue
else:
bar = bar-1
maxhlen = max(maxhlen, len(data))
# Weight the data according to the reuse distance
for i,d in enumerate(data[:]):
# Reuse distance increases by a power of 2
if i != 0:
rd = 1 << i;
else:
rd = 0
data[i] = int(d * pow(rd,powval))
if current_thread == th:
barrier_data.append(data)
else:
thread_data.append(barrier_data)
barrier_data = []
barrier_data.append(data)
current_thread = th
#print th, b, data
# add the last barrier_data to the thread data
thread_data.append(barrier_data)
barrier_data = []
#print thread_data
out = gzip.GzipFile(out_fn, 'w')
for b in range(bar+1):
out.write('T')
for t in range(th+1):
#print b, t
for idx,h in enumerate(thread_data[t][b]+([0]*(maxhlen-len(thread_data[t][b])))):
if h != 0:
out.write(':%d:%d ' % (1+idx+(t*maxhlen), h))
out.write('\n')
out.close()
os.system('touch "%s"' % (os.path.join(out_dir,'success')))
if __name__ == '__main__':
run()
| trevorcarlson/barrierpoint | weight_reuse_distances.py | Python | mit | 3,739 |
from numpy import *
from scipy import *
from Metadata import *
class Boxel: #this is how a boxel is represented
def __init__(self, x, y, z, size, isFull, metadata):
self.x=x
self.y=y
self.z=z
self.size=size
self.metadata=metadata
self.isFull=isFull #a boolean
class BoxelSet: # definition of an ordered set of Boxels
def __init__(self):
self.boxelArray= None
self.boxelSize=None
self.xmax=None#this is to keep track of the actual coordinates (in mm) that will be usefull during G-code generation ; xmax-xmin/boxelSize should be an integer
self.xmin=None
self.ymax=None
self.ymin=None
self.zmax=None
self.zmin=None
# with a good parser we could directly get a BoxelSet
def support(boxelSet, requirements):
# to be written, not useful at the moment since we don't have input
# support has to be created before slicing
# maybe it should become internal to the class BoxelSet
# requirements is the list of settings wanted by the user
()
def infill(boxelSet,requirements):
# to be written, not useful at the moment since we don't have input
# infill has to be created before slicing
# maybe it should become internal to the class BoxelSet
# requirements is the list of settings wanted by the user
()
def filerepair(boxelSet):
# to be written, not useful at the moment since we don't have input
# infill has to be created before slicing
# maybe it should become internal to the class BoxelSet
()
def findLayer(boxelSet, height, thickness):
# height and thickness are only integer, we are counting in 'boxel unit'
#maybe it should be internal to the class BoxelSet
return boxelSet.boxelArray[:,:,height:height+thickness]
def layertogrid(layer):
(lenx,leny,lenz)=layer.shape
grid = array(lenx, leny)#creating the grid
for xi in range(lenx):
for yi in range(leny): #filling it
isFull = False
for zi in range(lenz):
if layer[xi][yi][zi].isFull: isFull=True
grid[xi][yi]=isFull
return grid
# one can easily add the metadata to the grid
#maybe it should be merged with findLayer
def slice(boxelSet, thickness): #slicing, without the G-code generation
#maybe it should be internal to the class BoxelSet
(lenx,leny,lenz)=boxelSet.shape
height=0
layerlist=[]
while (height<lenz):
layer=findLayer(boxelSet, height, thickness)
grid=layertogrid(layer)
list.append(grid) # the last 3 lines could be sumed up in 1 operation, not wasting memory, it is just for readability
height=height+thickness
return layerlist
| mp5-io/MP5-Project | Boxels/src/Boxels.py | Python | gpl-3.0 | 2,796 |
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from configparser import ConfigParser
from textwrap import dedent
import os
import sys
# project
from azurectl.azurectl_exceptions import (
AzureConfigAccountNotFound,
AzureConfigRegionNotFound,
AzureAccountDefaultSectionNotFound,
AzureAccountLoadFailed,
AzureConfigVariableNotFound,
AzureConfigSectionNotFound,
AzureConfigParseError,
AzureConfigAccountFileNotFound,
AzureConfigDefaultLinkError
)
from azurectl.config.file_path import ConfigFilePath
class Config(object):
"""
Reading of config file attributes. Any instance holds state
information about Azure account, region, storage and container
references
"""
PLATFORM = sys.platform[:3]
def __init__(
self,
account_name=None,
region_name=None,
storage_account_name=None,
storage_container_name=None,
filename=None,
platform=PLATFORM
):
from azurectl.logger import log
self.storage_container_name = storage_container_name
self.storage_account_name = storage_account_name
self.config_file = self.__lookup_config_file(
platform, account_name, filename
)
self.config = ConfigParser()
try:
log.debug('Using configuration from %s', self.config_file)
self.config.read(self.config_file)
except Exception as e:
raise AzureConfigParseError(
'Could not parse config file: "%s"\n%s' %
(self.config_file, e.message)
)
if not self.config.defaults():
raise AzureAccountDefaultSectionNotFound(
'Empty or undefined default section in configuration file %s' %
self.config_file
)
self.account_name = self.__import_default_account()
self.selected_region_name = region_name
self.region_name = None
def get_storage_account_name(self):
storage_account_name = self.storage_account_name
if not storage_account_name:
storage_account_name = self.__get_region_option(
'default_storage_account'
)
return storage_account_name
def get_storage_container_name(self):
storage_container_name = self.storage_container_name
if not storage_container_name:
storage_container_name = self.__get_region_option(
'default_storage_container'
)
return storage_container_name
def get_subscription_id(self):
return self.__get_account_option('subscription_id')
def get_publishsettings_file_name(self):
return self.__get_account_option('publishsettings')
def get_management_url(self):
return self.__get_account_option('management_url')
def get_management_pem_filename(self):
return self.__get_account_option('management_pem_file')
def get_region_name(self):
if not self.region_name:
try:
self.region_name = self.__import_default_region(
self.selected_region_name
).replace('region:', '')
except AzureConfigSectionNotFound:
self.region_name = self.selected_region_name
return self.region_name
def get_account_name(self):
return self.account_name.replace('account:', '')
@classmethod
def get_config_file(self, account_name=None, filename=None, platform=None):
paths = ConfigFilePath(account_name, platform)
if filename:
return filename
elif account_name:
return paths.default_new_account_config()
else:
return paths.default_config()
@classmethod
def get_config_file_list(self):
paths = ConfigFilePath()
return [
paths.default_config()
] + paths.account_config()
@classmethod
def set_default_config_file(self, account_name, platform=None):
paths = ConfigFilePath(account_name, platform)
account_config_file = paths.default_new_account_config()
if not os.path.exists(account_config_file):
raise AzureConfigAccountFileNotFound(
'Account config file %s not found' % account_config_file
)
default_config_file = paths.default_config()
if not default_config_file:
default_config_file = paths.default_new_config()
default_exists = os.path.exists(default_config_file)
default_islink = os.path.islink(default_config_file)
if default_exists and not default_islink:
message = dedent('''
Can not link %s as default account.
A default account configuration file from a former
azurectl version was found. Consider one of the following
options to handle the config file: %s
1. Delete the configuration file if no longer needed
2. Move the configuration file with context information to
~/.config/azurectl/config.<context>
''').strip()
raise AzureConfigDefaultLinkError(
message % (account_config_file, default_config_file)
)
if default_exists:
os.remove(default_config_file)
os.symlink(account_config_file, default_config_file)
def __check_for_section(self, section):
if section and not self.config.has_section(section):
raise AzureConfigSectionNotFound(
'Section %s not found in configuration file %s' %
(section, self.config_file)
)
def __get_account_option(self, option):
try:
result = self.config.get(self.account_name, option)
except Exception:
raise AzureConfigVariableNotFound(
'%s not defined for account %s in configuration file %s' %
(option, self.account_name, self.config_file)
)
return result
def __get_region_option(self, option):
try:
if not self.region_name:
self.get_region_name()
result = self.config.get('region:' + self.region_name, option)
except Exception as e:
message = '%s not found: %s' % (option, format(e))
raise AzureConfigVariableNotFound(
message
)
return result
def __lookup_config_file(self, platform, account_name, filename):
paths = ConfigFilePath(account_name, platform)
if filename:
# lookup a custom config file
if not os.path.isfile(filename):
raise AzureAccountLoadFailed(
'Could not find config file: %s' % filename
)
elif account_name:
# lookup an account config file
filename = paths.default_new_account_config()
if not os.path.isfile(filename):
raise AzureAccountLoadFailed(
'Could not find account config file: %s %s: %s' %
(
paths.account_config_file, 'in home directory',
paths.home_path
)
)
else:
# lookup default config file
filename = paths.default_config()
if not filename:
raise AzureAccountLoadFailed(
'could not find default configuration file %s %s: %s' %
(
' or '.join(paths.config_files),
'in home directory',
paths.home_path
)
)
return filename
def __import_default_region(self, region_name):
defaults = self.config.defaults()
if region_name:
region_name = 'region:' + region_name
else:
try:
region_name = defaults['default_region']
except Exception:
raise AzureConfigRegionNotFound(
'No region referenced in configuration file %s' %
self.config_file
)
self.__check_for_section(region_name)
return region_name
def __import_default_account(self):
defaults = self.config.defaults()
try:
account_name = defaults['default_account']
except Exception:
raise AzureConfigAccountNotFound(
'No account referenced in configuration file %s' %
self.config_file
)
self.__check_for_section(account_name)
return account_name
| SUSE/azurectl | azurectl/config/parser.py | Python | apache-2.0 | 9,291 |
# -*- coding: utf-8 -*-
#
# Copyright SHS-AV s.r.l. <http://www.zeroincombenze.org>)
#
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
#
# All Rights Reserved
#
"""Clodoo core functions
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
# from builtins import hex
# from builtins import str
from builtins import int
from past.builtins import basestring
from builtins import str
# from builtins import * # noqa: F403
from past.utils import old_div
import sys
import re
import odoorpc
try:
import oerplib
except:
if sys.version_info[0] == 2:
raise ImportError("Package oerplib not found")
from os0 import os0
try:
from clodoolib import debug_msg_log, msg_log, decrypt
except:
from clodoo.clodoolib import debug_msg_log, msg_log, decrypt
try:
from transodoo import (read_stored_dict,
translate_from_sym,
translate_from_to)
except:
from clodoo.transodoo import (read_stored_dict,
translate_from_sym,
translate_from_to)
try:
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
postgres_drive = True
except BaseException: # pragma: no cover
postgres_drive = False
standard_library.install_aliases() # noqa: E402
STS_FAILED = 1
STS_SUCCESS = 0
__version__ = "1.0.1"
#############################################################################
# Low level (driver) functions
def psql_connect(ctx):
cr = False
if (postgres_drive and
ctx.get('psycopg2', False)):
dbname = ctx['db_name']
dbuser = ctx['db_user']
pwd = ctx.get('db_password')
port = ctx.get('db_port') or 5432
cnx = psycopg2.connect(
dbname=dbname, user=dbuser, password=pwd, port=port)
cnx.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cr = cnx.cursor()
return cr
#############################################################################
# Connection and database
#
def cnx(ctx):
try:
if ctx['svc_protocol'] == 'jsonrpc':
odoo = odoorpc.ODOO(ctx['db_host'],
ctx['svc_protocol'],
ctx['xmlrpc_port'])
else:
odoo = oerplib.OERP(server=ctx['db_host'],
protocol=ctx['svc_protocol'],
port=ctx['xmlrpc_port'])
# version=ctx['oe_version'])
except BaseException: # pragma: no cover
odoo = False
return odoo
def exec_sql(ctx, query, response=None):
ctx['_cr'] = psql_connect(ctx)
try:
ctx['_cr'].execute(query)
if response:
response = ctx['_cr'].fetchall()
else:
response = True
except psycopg2.OperationalError:
os0.wlog('Error executing sql %s' % query)
response = False
try:
ctx['_cr'].close()
except psycopg2.OperationalError:
pass
return response
def sql_reconnect(ctx):
try:
ctx['_cr'].close()
except BaseException:
pass
ctx['_cr'] = psql_connect(ctx)
def connectL8(ctx):
"""Open connection to Odoo service"""
odoo = cnx(ctx)
if not odoo:
if ctx['oe_version'] != '*':
return u"!Odoo server %s is not running!" % ctx['oe_version']
if ctx['svc_protocol'] == 'jsonrpc' and sys.version_info[0] == 2:
ctx['svc_protocol'] = 'xmlrpc'
odoo = cnx(ctx)
if not odoo:
return u"!Odoo server %s is not running!" % ctx['oe_version']
if ctx['svc_protocol'] == 'jsonrpc':
ctx['server_version'] = odoo.version
else:
try:
ctx['server_version'] = odoo.db.server_version()
except BaseException:
ctx['server_version'] = odoo.version
x = re.match(r'[0-9]+\.[0-9]+', ctx['server_version'])
if (ctx['oe_version'] != '*' and
ctx['server_version'][0:x.end()] != ctx['oe_version']):
return u"!Invalid Odoo Server version: expected %s, found %s!" % \
(ctx['oe_version'], ctx['server_version'])
elif ctx['oe_version'] == '*':
ctx['oe_version'] = ctx['server_version'][0:x.end()]
ctx['majver'] = eval(ctx['server_version'].split('.')[0])
if ctx['majver'] < 10 and ctx['svc_protocol'] == 'jsonrpc':
ctx['svc_protocol'] = 'xmlrpc'
return connectL8(ctx)
ctx['odoo_session'] = odoo
return True
#############################################################################
# Primitive version indipendent
#
def searchL8(ctx, model, where, order=None, context=None):
if ctx['svc_protocol'] == 'jsonrpc':
return ctx['odoo_session'].env[model].search(where, order=order,
context=context)
else:
return ctx['odoo_session'].search(model, where, order=order,
context=context)
def browseL8(ctx, model, id, context=None):
if ctx['svc_protocol'] == 'jsonrpc':
if context:
return ctx['odoo_session'].env[model].browse(id).with_context(
context)
else:
return ctx['odoo_session'].env[model].browse(id)
else:
return ctx['odoo_session'].browse(model, id, context=context)
def createL8(ctx, model, vals, context=None):
vals = drop_invalid_fields(ctx, model, vals)
vals = complete_fields(ctx, model, vals)
if ctx['svc_protocol'] == 'jsonrpc':
if context:
return ctx['odoo_session'].env[model].create(vals).with_context(
context)
else:
return ctx['odoo_session'].env[model].create(vals)
else:
return ctx['odoo_session'].create(model, vals)
#
#
# def write_recordL8(ctx, record):
# # vals = drop_invalid_fields(ctx, model, vals)
# if ctx['svc_protocol'] == 'jsonrpc':
# model = record.__class__.__name__
# ctx['odoo_session'].env[model].write(record)
# else:
# ctx['odoo_session'].write_record(record)
def writeL8(ctx, model, ids, vals, context=None):
vals = drop_invalid_fields(ctx, model, vals)
if ctx['svc_protocol'] == 'jsonrpc':
if context:
return ctx['odoo_session'].env[model].browse(ids).with_context(
context).write(vals)
else:
return ctx['odoo_session'].env[model].write(ids, vals)
else:
return ctx['odoo_session'].write(
model, ids, vals, context=context)
def unlinkL8(ctx, model, ids):
if ctx['svc_protocol'] == 'jsonrpc':
return ctx['odoo_session'].env[model].unlink(ids)
else:
return ctx['odoo_session'].unlink(model, ids)
def executeL8(ctx, model, action, *args):
action = translate_from_to(ctx,
model,
action,
'10.0',
ctx['oe_version'],
type='action')
if ctx['majver'] < 10 and action == 'invoice_open':
return ctx['odoo_session'].exec_workflow(model,
action,
*args)
return ctx['odoo_session'].execute(model,
action,
*args)
def execute_action_L8(ctx, model, action, ids):
sts = 0
if (model == 'account.invoice'):
ids = [ids] if isinstance(ids, int) else ids
try:
if ctx['majver'] >= 10:
executeL8(ctx,
model,
'compute_taxes',
ids)
else:
executeL8(ctx,
model,
'button_compute',
ids)
executeL8(ctx,
model,
'button_reset_taxes',
ids)
ids = ids[0]
except RuntimeError:
pass
elif (model == 'sale.order'):
ids = [ids] if isinstance(ids, int) else ids
try:
executeL8(ctx,
model,
'compute_tax_id',
ids)
except RuntimeError:
pass
executeL8(ctx,
model,
action,
ids)
return sts
###########################################################
# Version adaptive functions
#
def drop_fields(ctx, model, vals, to_delete):
for name in to_delete:
if isinstance(vals, (list, tuple)):
del vals[vals.index(name)]
else:
del vals[name]
msg = u"Invalid field %s of %s)" % (name, model)
debug_msg_log(ctx, 6, msg)
return vals
def complete_fields(ctx, model, vals):
to_delete = []
for name in ctx.get('STRUCT', {}).get(model, {}):
if (is_required_field(ctx, model, name) and
(name not in vals or not vals[name])):
vals[name] = set_some_values(ctx, None, name, '',
model=model, row=vals)
if not vals.get(name):
to_delete.append(name)
return drop_fields(ctx, model, vals, to_delete)
def drop_invalid_fields(ctx, model, vals):
if model in ctx.get('STRUCT', {}).get(model, {}):
if isinstance(vals, (list, tuple)):
to_delete = list(set(vals) - set(ctx['STRUCT'][model].keys()))
else:
to_delete = list(set(vals.keys()) -
set(ctx['STRUCT'][model].keys()))
return drop_fields(ctx, model, vals, to_delete)
return vals
def tnl_2_ver_seq_code(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
if src_ver == '10.0' and tgt_ver == '8.0':
if not searchL8(ctx,
'ir.sequence.type',
[('code', '=', vals[new_name])]):
createL8(ctx, 'ir.sequence.type',
{'code': vals[new_name],
'name': vals[new_name].replace('.', ' ')})
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_acc_type(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
TNL_9_TO_10 = {
'income': 'other',
'none': 'other',
'liability': 'payable',
'asset': 'other',
'expense': 'other',
}
TNL_10_TO_9 = {
'receivable': 'asset',
'liquidity': 'asset',
'payable': 'liability',
'other': 'none',
}
tbl = False
src_v = eval(src_ver.split('.')[0])
tgt_v = eval(tgt_ver.split('.')[0])
if src_v < 10 and tgt_v >= 10:
tbl = TNL_9_TO_10
elif src_v >= 10 and tgt_v < 10:
tbl = TNL_10_TO_9
if tbl:
vals[new_name] = tbl[vals[name]]
# vals[new_name] = translate_from_to(ctx,
# model,
# vals[name],
# src_ver,
# tgt_ver,
# type='value',
# fld_name='report_type')
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_group(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
'''Type Group'''
if name != new_name:
del vals[name]
if new_name in vals and src_ver == '10.0' and tgt_ver == '8.0':
if vals[new_name] == 'group':
vals['child_depend'] = True
del vals[new_name]
return vals
def tnl_2_ver_type_tax_use(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
if vals.get(new_name) not in ('sale', 'purchase'):
if vals['description'][-1] == 'v':
vals[new_name] = 'sale'
else:
vals[new_name] = 'purchase'
if vals['type_tax_use'] == 'sale':
code = 'IT%s%sD' % (
'D', vals['description'][0:-1])
ids = searchL8(ctx, 'account.tax.code',
[('code', '=', code)])
if ids:
vals['base_code_id'] = ids[0]
vals['ref_base_code_id'] = ids[0]
code = 'IT%s%sV' % (
'D', vals['description'][0:-1])
ids = searchL8(ctx, 'account.tax.code',
[('code', '=', code)])
if ids:
vals['tax_code_id'] = ids[0]
vals['ref_tax_code_id'] = ids[0]
elif vals['type_tax_use'] == 'purchase':
code = 'IT%s%sD' % (
'C', vals['description'][0:-1])
ids = searchL8(ctx, 'account.tax.code',
[('code', '=', code)])
if ids:
vals['base_code_id'] = ids[0]
vals['ref_base_code_id'] = ids[0]
code = 'IT%s%sV' % (
'C', vals['description'][0:-1])
ids = searchL8(ctx, 'account.tax.code',
[('code', '=', code)])
if ids:
vals['tax_code_id'] = ids[0]
vals['ref_tax_code_id'] = ids[0]
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_tax_amount(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
if src_ver == '10.0' and tgt_ver == '8.0':
vals[new_name] = old_div(vals[new_name], 100)
elif src_ver == '8.0' and tgt_ver == '10.0':
vals[new_name] = vals[new_name] * 100
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_vat(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
'''External vat may not contain ISO code'''
if (isinstance(vals[new_name], basestring) and
len(vals[new_name]) == 11 and
vals[new_name].isdigit()):
vals[new_name] = 'IT%s' % vals[new_name]
else:
vals[new_name] = vals[new_name]
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_state_id(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
if 'country_id' in vals:
vals[new_name] = get_state_id(ctx, vals[new_name],
country_id=vals['country_id'])
else:
vals[new_name] = get_state_id(ctx, vals[new_name])
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_child_id(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
if eval(tgt_ver.split('.')[0]) >= 10 and vals[name]:
vals = {}
return vals
def tnl_2_ver_set_value(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
vals[new_name] = default
if name != new_name:
del vals[name]
return vals
def tnl_2_ver_drop_record(ctx, model, vals, new_name, name, src_ver, tgt_ver,
default=None):
vals = {}
return vals
def cvt_from_ver_2_ver(ctx, model, src_ver, tgt_ver, vals):
APPLY = {
'account.account.type': {'type': 'acc_type()',
'report_type': 'acc_type()',
},
'account.account': {'child_id': 'child_id()',
},
'account.tax': {'type': 'group()',
'type_tax_use': 'type_tax_use()',
},
'res.partner': {'is_company': 'true',
'vat': 'vat()',
'state_id': 'state_id()',
},
'ir.sequence': {'code': 'seq_code()',
},
}
def process_fields(ctx, model, vals, src_ver, tgt_ver,
field_list=None, excl_list=None):
for name in vals.copy():
new_name = translate_from_to(ctx,
model,
name,
src_ver,
tgt_ver)
if not new_name:
new_name = name
if name == 'company_id':
if ctx.get('by_company'):
vals[name] = ctx['company_id']
elif model in APPLY:
default = ''
if new_name in APPLY[model]:
default = APPLY[model][new_name]
if field_list and new_name and new_name not in field_list:
continue
if excl_list and new_name and new_name in excl_list:
continue
if default.endswith('()'):
apply = 'tnl_2_ver_%s' % default[:-2]
default = False
elif default:
apply = 'tnl_2_ver_set_value'
if default == 'true':
default = os0.str2bool(default, True)
else:
apply = ''
if not apply or apply not in list(globals()):
if name != new_name:
vals[new_name] = vals[name]
del vals[name]
continue
if not new_name:
vals = globals()[apply](ctx, model, vals, name, name,
src_ver, tgt_ver, default=default)
else:
vals = globals()[apply](ctx, model, vals, new_name, name,
src_ver, tgt_ver, default=default)
if not vals:
break
return vals
if ctx.get('mindroot'):
if not ctx.get('by_company') and 'company_id' in vals:
ctx['company_id'] = vals['company_id']
elif model_has_company(ctx, model):
ctx['company_id'] = ctx['def_company_id']
if ctx.get('company_id'):
ctx['country_id'] = browseL8(ctx, 'res.company',
ctx['company_id']).partner_id.country_id.id
else:
ctx['country_id'] = False
pf_list = ('company_id', 'country_id', 'street')
vals = process_fields(ctx, model, vals, src_ver, tgt_ver,
field_list=pf_list)
vals = process_fields(ctx, model, vals, src_ver, tgt_ver,
excl_list=pf_list)
return vals
def extr_table_generic(ctx, model, keys=None, alls=None):
get_model_structure(ctx, model)
field_names = []
for field in ctx['STRUCT'][model]:
if (alls or
(keys and field in keys) or
(not keys and not ctx['STRUCT'][model][field]['readonly'])):
field_names.append(field)
return field_names
def get_val_from_field(ctx, model, rec, field, format=False):
if not hasattr(rec, field):
return None
res = rec[field]
if res:
if callable(rec[field]):
return None
get_model_structure(ctx, model)
if ctx['STRUCT'][model][field]['ttype'] in ('many2many', 'one2many'):
res = []
for id in rec[field]:
res.append(id.id)
if format == 'cmd':
res = [(6, 0, res)]
elif ctx['STRUCT'][model][field]['ttype'] in ('date', 'datetime'):
if format in ('cmd', 'str'):
res = str(res)
elif ctx['STRUCT'][model][field]['ttype'] == 'many2one':
res = rec[field].id
if format == 'cmd':
res = [(6, 0, res)]
elif ctx['STRUCT'][model][field]['ttype'] == ('integer', 'float'):
if format == 'cmd':
res = str(res)
return res
def extract_vals_from_rec(ctx, model, rec, keys=None, format=False):
if keys:
if isinstance(keys, dict):
field_names = keys.keys()
elif isinstance(keys, list):
field_names = keys
else:
keys = None
if not keys:
func = 'extr_table_%s' % model
func = func.replace('.', '_')
if func in globals():
field_names = globals()[func](ctx)
else:
field_names = extr_table_generic(ctx, model)
res = {}
for field in field_names:
res[field] = get_val_from_field(ctx, model, rec, field, format=format)
return res
FIX_7_0 = {
'res.partner': {'name': {'required': True}},
'product.product': {'name': {'required': True}},
'product.template': {'name': {'required': True}},
'res.users': {'name': {'required': True}},
'account.invoice': {'company_id': {'readonly': False},
'number': {'readonly': False},
'date_invoice': {'readonly': False},
'journal_id': {'readonly': False},
'account_id': {'readonly': False},
'amount_tax': {'readonly': False},
'amount_total': {'readonly': False},
'amount_untaxed': {'readonly': False},
'internal_number': {'readonly': False},
'move_id': {'readonly': False},
'name': {'readonly': False},
'partner_id': {'readonly': False},
},
'account.invoice.line': {'company_id': {'readonly': False},
'number': {'readonly': False},
'date_invoice': {'readonly': False},
'journal_id': {'readonly': False}},
}
FIX_ALL = {
'message_follower_ids': {'readonly': True},
'message_ids': {'readonly': True},
'message_is_follower': {'readonly': True},
'message_summary': {'readonly': True},
'message_unread': {'readonly': True},
}
def get_model_structure(ctx, model, ignore=None):
read_stored_dict(ctx)
ignore = ignore or []
if ctx.get('STRUCT', {}).get(model, {}) and not ignore:
return
ctx['STRUCT'] = ctx.get('STRUCT', {})
ctx['STRUCT'][model] = ctx['STRUCT'].get(model, {})
ir_model = 'ir.model.fields'
for field in browseL8(ctx,
ir_model,
searchL8(ctx,
ir_model,
[('model', '=', model)])):
res = FIX_7_0.get(model, {}).get(field, {}).get('required', None)
required = res if res is not None else field.required
if (field.name == 'id' or
(ctx['majver'] >= 9 and field.compute) or
field.name in ignore or
field.ttype in ('binary', 'reference')):
readonly = True
else:
readonly = FIX_ALL.get(
model, {}).get(field, {}).get('readonly', False) or \
FIX_7_0.get(
model, {}).get(field, {}).get('readonly', False)
ctx['STRUCT'][model][field.name] = {
'ttype': field.ttype,
'relation': field.relation,
'required': required,
'readonly': readonly,
}
# FIX for Odoo 7.0
field = 'id'
if field not in ctx['STRUCT'][model]:
ctx['STRUCT'][model][field] = {
'ttype': 'integer',
'relation': False,
'required': False,
'readonly': True,
}
field = 'name'
if model in ('res.users', 'res.partner', 'product.product',
'product.template'):
if field not in ctx['STRUCT'][model]:
ctx['STRUCT'][model][field] = {
'ttype': 'char',
'relation': False,
'required': True,
'readonly': False,
}
def build_model_struct(ctx):
o_model = {}
for p in ('model',
'model_code',
'model_name',
'model_action',
'model_keyids',
'hide_cid',
'alias_model2',
'alias_field'):
if p in ctx:
o_model[p] = ctx[p]
if not o_model.get('model_code') and not o_model.get('model_name'):
o_model['model_code'] = 'id'
if not o_model.get('model_keyids') and ctx.get('header_id'):
o_model['model_keyids'] = ctx['header_id']
return o_model
def get_model_model(ctx, o_model):
if 'model' in o_model:
if isinstance(o_model['model'], basestring):
model = o_model['model']
else:
model_selector = o_model.get('cur_model',
o_model['model'].keys()[0])
model = o_model['model'][model_selector]
else:
model = False
return model
def get_model_name(ctx, o_model):
if 'model_name' in o_model:
if isinstance(o_model['model_name'], basestring):
model_name = o_model['model_name']
else:
model_selector = o_model.get('cur_model',
o_model['model_name'].keys()[0])
model_name = o_model['model_name'][model_selector]
else:
model_name = False
return model_name
def get_res_users(ctx, user, field):
if field == 'name':
if ctx['oe_version'] == "6.1":
return user.name
else:
return user.partner_id.name
elif field == 'lang':
if ctx['oe_version'] == "6.1":
return user.context_lang
else:
return user.partner_id.lang
elif field == 'email':
if ctx['oe_version'] == "6.1":
return user.user_email
else:
return user.partner_id.email
elif field == 'country_id':
if ctx['oe_version'] == "6.1":
if user.company_id.country_id:
return user.company_id.country_id.id
return False
else:
if user.partner_id.country_id:
return user.partner_id.country_id.id
elif user.company_id.country_id:
return user.company_id.country_id.id
return False
return user[field]
###########################################################
# Others
#
def _get_model_bone(ctx, o_model):
"""Inherit model structure from a parent model"""
model = None
hide_cid = False
if ctx is not None:
if 'model' in ctx:
model = ctx['model']
if model == '':
model = None
else:
if 'hide_cid' in ctx:
hide_cid = ctx['hide_cid']
else:
hide_cid = not model_has_company(ctx,
model)
if model is None:
if 'model' in o_model:
model = o_model['model']
if model == '':
model = None
if 'hide_cid' in o_model:
hide_cid = o_model['hide_cid']
else:
hide_cid = not model_has_company(ctx,
model)
return model, hide_cid
def _import_file_model(ctx, o_model, csv_fn):
"""Get model name of import file"""
model, hide_cid = _get_model_bone(ctx, o_model)
if model is None:
model = os0.nakedname(csv_fn).replace('-', '.').replace('_', '.')
return model, hide_cid
def _get_model_code(ctx, o_model):
"""Get key field(s) name of model"""
if 'model_code' in o_model:
code = o_model['model_code']
elif 'code' in o_model:
code = o_model['code']
elif 'name' in o_model:
code = o_model['name']
elif 'code' in ctx:
code = 'code'
elif 'name' in ctx:
code = 'name'
elif 'id' in ctx:
code = 'id'
else:
code = 'name'
return code
def _get_model_name(ctx, o_model):
"""Get description field(s) name of model"""
if 'model_name' in o_model:
name = o_model['model_name']
elif 'name' in o_model:
name = o_model['name']
elif 'code' in o_model:
name = o_model['code']
elif 'name' in ctx:
name = 'name'
elif 'code' in ctx:
name = 'code'
else:
name = 'name'
return name
def _import_file_dbtype(o_model, fields, csv_fn):
"""Get db selector name of import file"""
if 'db_type' in o_model:
db_type = o_model['db_type']
elif 'db_type' in fields:
db_type = 'db_type'
else:
db_type = False
return db_type
def import_file_get_hdr(ctx, o_model, csv_obj, csv_fn, row):
"""Analyze csv file header and get header names
Header will be used to load value in table
@ return:
@ ['tables'] table aliases, if import many tables
i.e. {'H': 'move', 'D': 'move.line'}
@ ['model'] model name
@ ['hide_cid'] do not add company_id
@ ['name'] field name which is the record description
@ ['code'] field name which is the record key
@ ['db_type'] db type to record selection
@ ['repl_by_id'] search by id if no record name found
@ ['hide_id'] if true, no id will be returned
@ ['alias_field'] field name to create external identifier
@ ['alias_field2'] field name to create external identifier of many2one
Returned fields may be text if import just 1 table or
dictionary if import more tables; key is table id
i.e. return['name'] = {'A': 'name', 'B': 'name'}
"""
o_skull = o_model.copy()
csv_obj.fieldnames = row['undef_name']
o_skull['model'], o_skull['hide_cid'] = _import_file_model(ctx,
o_model,
csv_fn)
o_skull['name'] = _get_model_name(csv_obj.fieldnames,
o_model)
o_skull['code'] = _get_model_code(csv_obj.fieldnames,
o_model)
o_skull['db_type'] = _import_file_dbtype(o_model,
csv_obj.fieldnames,
csv_fn)
if o_skull['code'] != 'id' and 'id' in csv_obj.fieldnames:
o_skull['repl_by_id'] = True
else:
o_skull['repl_by_id'] = False
o_skull['hide_id'] = True
o_skull['alias_model2'] = o_model.get('alias_model2', '')
o_skull['alias_field'] = o_model.get('alias_field', '')
return o_skull
def get_company_id(ctx):
value = get_db_alias(ctx, 'z0bug.mycompany')
if not value or (isinstance(value, basestring) and not value.isdigit()):
model = 'res.company'
company_name = ctx.get('company_name', 'La % Azienda')
ids = searchL8(ctx, model, [('name', 'ilike', company_name)])
if not ids:
ids = searchL8(ctx, model, [('id', '>', 1)])
if ids:
value = ids[0]
else:
value = 1
if 'company_id' not in ctx and isinstance(value, int):
ctx['company_id'] = value
return value
def get_country_id(ctx, value):
if value:
model = 'res.country'
if value[0:5] == 'base.':
ids = searchL8(ctx, model,
[('code', '=', value[5:].upper())])
else:
ids = searchL8(ctx, model,
[('code', '=', value.upper())])
if not ids:
ids = searchL8(ctx, model,
[('name', 'ilike', value)])
if ids:
value = ids[0]
else:
value = False
else:
value = ctx['def_country_id']
return value
def get_state_id(ctx, value, country_id=None):
if value:
if not country_id:
country_id = ctx['def_country_id']
model = 'res.country.state'
ids = searchL8(ctx, model,
[('country_id', '=', country_id),
('code', '=', value.upper())])
if not ids:
ids = searchL8(ctx, model,
[('country_id', '=', country_id),
('name', 'ilike', value)])
if ids:
value = ids[0]
else:
value = False
return value
def set_null_val_code_n_name(ctx, name, val, row=None):
if name == 'code':
if row and 'name' in row:
value = hex(hash(row['name']))[2:]
# else:
# code = hex(hash(datetime.datetime.now().microsecond))[2:]
return value
def set_null_val_account_account_type(ctx, name, val, row=None):
return set_null_val_code_n_name(ctx, name, val, row=row)
def set_null_val_account_account(ctx, name, val, row=None):
if name == 'code':
return set_null_val_code_n_name(ctx, name, val, row=row)
return val
def set_null_val_account_tax(ctx, name, val, row=None):
if name == 'applicable_type':
return 'true'
return val
def set_null_val_account_invoice(ctx, name, val, row=None):
if name == 'state':
return 'draft'
return val
def set_null_val_ir_sequence(ctx, name, val, row=None):
if name == 'number_increment':
return 1
return val
def set_some_values(ctx, o_model, name, value, model=None, row=None):
"""Set default value for empties fields"""
if not model:
model = get_model_model(ctx, o_model)
if not value and name in ctx.get('DEFAULT', ''):
value = ctx['DEFAULT'][name]
elif name == 'company_id':
if not value:
value = ctx['company_id']
elif name == 'country_id':
value = get_country_id(ctx, value)
else:
func = 'set_null_val_%s' % model.replace('.', '_')
if func in globals():
return globals()[func](ctx, name, value, row=row)
elif model == 'res.partner':
if name == 'is_company':
return True
elif name == 'vat':
if ctx.get('country_code') == 'IT' and value.isdigit():
value = 'IT%011d' % eval(value)
elif name == 'state_id':
if row and 'country_id' in row:
value = get_state_id(ctx, value,
country_id=row['country_id'])
else:
value = get_state_id(ctx, value)
elif model == 'res.users':
if name == 'email':
if ctx['with_demo']:
return ctx['def_email']
elif not ctx['with_demo']:
return ctx['zeroadm_mail']
return value
def eval_value(ctx, o_model, name, value):
"""Evaluate value read from csv file: may be a function or macro
@ ctx: global parameters
@ o_model: special names
@ name: field name
@ value: field value (constant, macro or expression)
"""
name = os0.u(name)
value = os0.u(value)
msg = u"eval_value(name=%s, value=%s)" % (name, value)
debug_msg_log(ctx, 6, msg)
if not value and o_model:
return set_some_values(ctx, o_model, name, value)
elif isinstance(value, basestring):
eval_dict = True
token = '$1$!' if isinstance(value, str) else b'$1$!'
if value.startswith(token):
value = decrypt(value[4:])
if is_db_alias(ctx, value):
value = get_db_alias(ctx, value)
else:
token = '=' if isinstance(value, str) else b'='
tok_left = '${' if isinstance(value, str) else b'${'
tok_right = '}' if isinstance(value, str) else b'}'
tok_beg = '[(' if isinstance(value, str) else b'[('
tok_end = ')]' if isinstance(value, str) else b')]'
if value.startswith(token):
value = expr(ctx,
o_model,
name,
value[1:])
eval_dict = False
elif tok_left in value and tok_right in value:
value = expr(ctx,
o_model,
name,
value)
eval_dict = False
elif value.startswith(tok_beg) and value.endswith(tok_end):
value = expr(ctx,
o_model,
name,
value)
eval_dict = False
if isinstance(value, basestring):
if value in ('None', 'True', 'False') or \
(value[0:2] == "[(" and value[-2:] == ")]"):
if eval_dict:
try:
value = eval(value, None, ctx)
except BaseException: # pragma: no cover
pass
else:
try:
value = eval(value)
except BaseException: # pragma: no cover
pass
elif value.isdigit():
ir_model = 'ir.model.fields'
ids = searchL8(ctx,
ir_model,
[('model', '=', o_model),
('name', '=', name)])
if ids:
ttype = browseL8(ctx,
ir_model,
ids[0]).ttype
if ttype in ('integer', 'float', 'many2one'):
try:
value = eval(value)
except BaseException: # pragma: no cover
pass
return value
def expr(ctx, o_model, code, value):
"""Evaluate python expression value"""
if isinstance(value, basestring):
i, j = get_macro_pos(value)
if i >= 0 and j > i:
v = value[i + 2:j]
x, y = get_macro_pos(v)
while x >= 0 and y > i:
v = expr(ctx, o_model, code, v)
value = value[0:i + 2] + v + value[j:]
i, j = get_macro_pos(value)
v = value[i + 2:j]
x, y = get_macro_pos(v)
res = ""
while i >= 0 and j > i:
v = value[i + 2:j]
if v.find(':') >= 0:
v = _query_expr(ctx, o_model, code, v)
else:
if v == 'zeroadm_email' and ctx['with_demo']:
v = 'def_email'
try:
v = eval(v, None, ctx)
except BaseException: # pragma: no cover
pass
if i > 0:
res = concat_res(res, value[0:i])
value = value[j + 1:]
res = concat_res(res, v)
i, j = get_macro_pos(value)
value = concat_res(res, value)
if isinstance(value, basestring) and \
value[0:2] == "[(" and value[-2:] == ")]":
res = []
for v in value[2:-2].split(','):
res.append(get_db_alias(ctx, v, fmt='string'))
value = '[(%s)]' % ','.join(res)
if isinstance(value, basestring):
value = get_db_alias(ctx, value)
return value
def _get_simple_query_id(ctx, model, code, value, hide_cid):
"""Execute a simple query to get ids from selection field(s)
Do not expand value
@ ctx: global parameters
@ model: model name
@ code: field name
@ value: field value (just constant)
@ hide_cid: hide company_id
"""
ids = _get_raw_query_id(ctx, model, code, value, hide_cid, '=')
if model == 'ir.model.data' and len(ids) == 1:
try:
ids = [browseL8(ctx, 'ir.model.data', ids[0]).res_id]
except BaseException: # pragma: no cover
ids = None
if ids is None:
return []
if len(ids) == 0 and model != 'res.users':
ids = _get_raw_query_id(ctx,
model,
code,
value,
hide_cid,
'ilike')
return ids
def _get_raw_query_id(ctx, model, code, value, hide_cid, op):
if not hide_cid and 'company_id' in ctx:
where = [('company_id', '=', ctx['company_id'])]
else:
where = []
if isinstance(code, list) and isinstance(value, list):
for i, c in enumerate(code):
if i < len(value):
where = append_2_where(ctx,
model,
c,
value[i],
where,
op)
else:
where = append_2_where(ctx,
model,
c,
'',
where,
op)
else:
where = append_2_where(ctx,
model,
code,
value,
where,
op)
try:
ids = searchL8(ctx, model, where)
except BaseException: # pragma: no cover
ids = None
return ids
def append_2_where(ctx, model, code, value, where, op):
if value is not None and value != "":
value = eval_value(ctx, model, code, value)
if isinstance(value, basestring) and value and value[0] == '~':
where.append('|')
where.append((code, op, value))
where.append((code, op, value[1:]))
elif not isinstance(value, basestring) and \
op in ('like', 'ilike', '=like', '=ilike'):
where.append((code, '=', value))
else:
where.append((code, op, value))
elif code == "country_id":
where.append((code, '=', ctx['def_country_id']))
elif code != "id" and code[-3:] == "_id":
where.append((code, '=', ""))
return where
def get_query_id(ctx, o_model, row):
"""Execute a query to get ids from fields in row read from csv
Value may be expanded
@ o_model: special names
@ ctx: global parameters
@ row: record fields
"""
model, hide_cid = _get_model_bone(ctx, o_model)
msg = "get_query_id(model=%s, hide_company=%s)" % (model, hide_cid)
debug_msg_log(ctx, 6, msg)
ids = []
if o_model['repl_by_id'] and row.get('id', None):
o_skull = o_model.copy()
o_skull['code'] = 'id'
o_skull['hide_id'] = False
value = eval_value(ctx,
o_skull,
'id',
row['id'])
if isinstance(value, int):
ids = searchL8(ctx, model, [('id', '=', value)])
if not ids:
if o_model['code'].find(',') >= 0:
code = o_model['code'].split(',')
else:
code = o_model['code']
if isinstance(code, list):
value = []
for p in code:
value.append(row.get(p, ''))
else:
value = row.get(code, '')
if not value:
if o_model['name'].find(',') >= 0:
code = o_model['name'].split(',')
else:
code = o_model['name']
if isinstance(code, list):
value = []
for p in code:
value.append(row.get(p, ''))
else:
value = row.get(code, '')
if model is None or not value:
ids = []
else:
ids = _get_simple_query_id(ctx,
model,
code,
value,
hide_cid)
return ids
def _query_expr(ctx, o_model, code, value):
msg = "_quer_expr(value=%s)" % value
debug_msg_log(ctx, 6, msg)
model, name, value, hide_cid, fldname = _get_model_parms(ctx,
o_model,
value)
if model:
if fldname == 'db_type':
value = o_model.get('db_type', '')
elif fldname == 'oe_versions':
value = value == ctx['server_version']
else:
value = _get_simple_query_id(ctx,
model,
name,
value,
hide_cid)
if isinstance(value, list):
if len(value):
value = value[0]
if fldname != 'id':
o = browseL8(ctx, model, value)
value = getattr(o, fldname)
else:
value = None
return value
def is_valid_field(ctx, model, name):
get_model_structure(ctx, model)
if name in ctx['STRUCT'][model]:
return True
return False
def is_required_field(ctx, model, name):
get_model_structure(ctx, model)
if name in ctx['STRUCT'][model]:
return ctx['STRUCT'][model][name]['required']
return False
def model_has_company(ctx, model):
return is_valid_field(ctx, model, 'company_id')
def get_macro_pos(value):
i = value.find("${")
o = 0
j = value.find("}", o)
if i >= 0:
p = i + 2
k = value.find("${", p)
else:
k = -1
while k >= 0 and j >= 0 and k < j:
o = j + 1
j = value.find("}", o)
p = k + 1
k = value.find("${", p)
return i, j
def _get_model_parms(ctx, o_model, value):
"""Extract model parameters and pure value from value and structure"""
model, hide_cid = _get_model_bone(ctx, o_model)
sep = '::'
name = 'name'
fldname = 'id'
i = value.find(sep)
if i >= 0:
hide_cid = False
else:
sep = ':'
i = value.find(sep)
if i >= 0:
hide_cid = True
if i < 0:
n, v = is_db_alias(ctx, value)
if n:
model = "ir.model.data"
name = ['module', 'name']
value = v
hide_cid = True
else:
model = None
try:
value = eval(value, None, ctx)
except BaseException: # pragma: no cover
pass
else:
model = value[:i]
value = value[i + len(sep):]
model, fldname = _get_name_n_ix(model, deflt=fldname)
model, x = _get_name_n_params(model, name)
if x.find(',') >= 0:
name = x.split(',')
value = value.split(',')
else:
name = x
return model, name, value, hide_cid, fldname
def concat_res(res, value):
if isinstance(res, basestring) and res:
if isinstance(value, basestring):
res = res + value
elif isinstance(value, (bool, int, float)):
res = res + str(value)
elif isinstance(res, (bool, int, float)):
if isinstance(value, basestring) and value:
res = str(res) + value
elif isinstance(value, (bool, int, float)):
res = str(res) + str(value)
else:
res = value
return res
def is_db_alias(ctx, value):
model, name, value, hide_cid = get_model_alias(value)
if model == 'ir.transodoo':
if value[2] and value[2] != '0':
return translate_from_to(ctx,
value[0],
name,
value[1],
value[2],
ctx['oe_version']) != ''
else:
return translate_from_sym(ctx,
value[0],
value[1],
ctx['oe_version']) != ''
if ctx['svc_protocol'] == 'jsonrpc':
if model and name and value and ctx['odoo_session'].env[model].search(
[(name[0], '=', value[0]),
(name[1], '=', value[1])]):
return True
else:
if model and name and value and searchL8(
ctx,
model,
[(name[0], '=', value[0]),
(name[1], '=', value[1])]):
return True
return False
def get_db_alias(ctx, value, fmt=None):
if is_db_alias(ctx, value):
model, name, value, hide_cid = get_model_alias(value)
if model == 'ir.transodoo':
if value[2] and value[2] != '0':
return translate_from_to(ctx,
value[0],
value[1],
value[2],
ctx['oe_version'])
else:
return translate_from_sym(ctx,
value[0],
value[1],
ctx['oe_version'])
ids = _get_simple_query_id(ctx,
model,
name,
value,
hide_cid)
if isinstance(ids, list):
if len(ids):
if name == 'id' or isinstance(name, list):
value = ids[0]
if fmt == 'string':
value = str(value)
else:
o = browseL8(ctx, model, ids[0])
value = getattr(o, name)
else:
value = None
return value
def get_model_alias(value):
if value:
items = value.split('.')
if len(items) == 3 and items[0] and items[0][0].isalpha() and \
items[-1] and items[-1][0].isdigit():
model = "ir.transodoo"
name = ['module', 'name', 'version']
value = [items[0], items[1], items[2]]
hide_cid = True
return model, name, value, hide_cid
elif len(items) == 2 and items[0] and items[0][0].isalpha():
model = "ir.model.data"
name = ['module', 'name']
value = [items[0], items[1]]
hide_cid = True
return model, name, value, hide_cid
return None, None, value, None
def put_model_alias(ctx,
model=None, name=None, ref=None, id=None, module=None):
if ref:
refs = ref.split('.')
if len(refs):
if not module:
module = refs[0]
if not name:
name = refs[1]
module = module or 'base'
if model and name and id:
ids = searchL8(ctx, 'ir.model.data',
[('model', '=', model),
('module', '=', module),
('name', '=', name)])
if ids:
writeL8(ctx, 'ir.model.data', ids, {'res_id': id})
else:
vals = {
'module': module,
'model': model,
'name': name,
'res_id': id,
}
createL8(ctx, 'ir.model.data', vals)
else:
msg = 'Invalid alias ref'
msg_log(ctx, ctx['level'], msg)
def _get_name_n_params(name, deflt=None):
"""Extract name and params from string like 'name(params)'"""
deflt = '' if deflt is None else deflt
i = name.find('(')
j = name.rfind(')')
if i >= 0 and j >= i:
n = name[:i]
p = name[i + 1:j]
else:
n = name
p = deflt
return n, p
def _get_name_n_ix(name, deflt=None):
"""Extract name and subscription from string like 'name[ix]'"""
deflt = '' if deflt is None else deflt
i = name.find('[')
j = name.rfind(']')
if i >= 0 and j >= i:
n = name[:i]
x = name[i + 1:j]
else:
n = name
x = deflt
return n, x
| zeroincombenze/tools | clodoo/clodoocore.py | Python | agpl-3.0 | 52,723 |
class RosterMixin(object):
def __init__(self, *args, **kwargs):
import logging
logging.critical(
"RosterMixin has been moved to the hipchat backend.\n" +
"Please change all your imports to `from will.backends.io_adapters.hipchat import HipChatRosterMixin`"
)
super(RosterMixin, self).__init__(*args, **kwargs)
| mike-love/will | will/mixins/roster.py | Python | mit | 371 |
from capstone.game.games import TicTacToe
from capstone.game.players import AlphaBeta, RandPlayer
from capstone.game.utils import play_series
game = TicTacToe()
players = [AlphaBeta(), RandPlayer()]
print('Players: {}\n'.format(players))
n_matches = 10
play_series(game, players, n_matches)
players.reverse()
print('\nPlayers: {}\n'.format(players))
play_series(game, players, n_matches)
| davidrobles/mlnd-capstone-code | experiments/play_series_alphabeta_vs_random.py | Python | mit | 389 |
from .predicate import Predicate
class Equality(Predicate):
def __init__(self):
super().__init__('=', 2)
| jadnohra/connect | proto_2/ddq/fol/equality.py | Python | unlicense | 119 |
input = """
c(2).
p(1).
a(2).
d(2,2,1).
okay(X):- c(X), #count{V:a(V),d(V,X,1)} = 1.
ouch(X):- p(X), #count{V:a(V),d(V,X,1)} = 1.
"""
output = """
c(2).
p(1).
a(2).
d(2,2,1).
okay(X):- c(X), #count{V:a(V),d(V,X,1)} = 1.
ouch(X):- p(X), #count{V:a(V),d(V,X,1)} = 1.
"""
| veltri/DLV2 | tests/parser/aggregates.count.boundvariables.1.test.py | Python | apache-2.0 | 269 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This file is part of astroid.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""Tests for basic functionality in astroid.brain."""
import sys
import unittest
import six
from astroid import MANAGER
from astroid import bases
from astroid import builder
from astroid import nodes
from astroid import test_utils
from astroid import util
import astroid
try:
import nose # pylint: disable=unused-import
HAS_NOSE = True
except ImportError:
HAS_NOSE = False
try:
import multiprocessing # pylint: disable=unused-import
HAS_MULTIPROCESSING = True
except ImportError:
HAS_MULTIPROCESSING = False
try:
import enum # pylint: disable=unused-import
HAS_ENUM = True
except ImportError:
HAS_ENUM = False
try:
import dateutil # pylint: disable=unused-import
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
try:
import pytest # pylint: disable=unused-import
HAS_PYTEST = True
except ImportError:
HAS_PYTEST = False
class HashlibTest(unittest.TestCase):
def test_hashlib(self):
"""Tests that brain extensions for hashlib work."""
hashlib_module = MANAGER.ast_from_module_name('hashlib')
for class_name in ['md5', 'sha1']:
class_obj = hashlib_module[class_name]
self.assertIn('update', class_obj)
self.assertIn('digest', class_obj)
self.assertIn('hexdigest', class_obj)
self.assertIn('block_size', class_obj)
self.assertIn('digest_size', class_obj)
self.assertEqual(len(class_obj['__init__'].args.args), 2)
self.assertEqual(len(class_obj['__init__'].args.defaults), 1)
self.assertEqual(len(class_obj['update'].args.args), 2)
self.assertEqual(len(class_obj['digest'].args.args), 1)
self.assertEqual(len(class_obj['hexdigest'].args.args), 1)
class NamedTupleTest(unittest.TestCase):
def test_namedtuple_base(self):
klass = test_utils.extract_node("""
from collections import namedtuple
class X(namedtuple("X", ["a", "b", "c"])):
pass
""")
self.assertEqual(
[anc.name for anc in klass.ancestors()],
['X', 'tuple', 'object'])
for anc in klass.ancestors():
self.assertFalse(anc.parent is None)
def test_namedtuple_inference(self):
klass = test_utils.extract_node("""
from collections import namedtuple
name = "X"
fields = ["a", "b", "c"]
class X(namedtuple(name, fields)):
pass
""")
for base in klass.ancestors():
if base.name == 'X':
break
self.assertSetEqual({"a", "b", "c"}, set(base._instance_attrs))
def test_namedtuple_inference_failure(self):
klass = test_utils.extract_node("""
from collections import namedtuple
def foo(fields):
return __(namedtuple("foo", fields))
""")
self.assertIs(util.YES, next(klass.infer()))
@unittest.skipIf(sys.version_info[0] > 2,
'namedtuple inference is broken on Python 3')
def test_namedtuple_advanced_inference(self):
# urlparse return an object of class ParseResult, which has a
# namedtuple call and a mixin as base classes
result = test_utils.extract_node("""
import urlparse
result = __(urlparse.urlparse('gopher://'))
""")
instance = next(result.infer())
self.assertEqual(len(instance.getattr('scheme')), 1)
self.assertEqual(len(instance.getattr('port')), 1)
with self.assertRaises(astroid.NotFoundError):
instance.getattr('foo')
self.assertEqual(len(instance.getattr('geturl')), 1)
self.assertEqual(instance.name, 'ParseResult')
def test_namedtuple_instance_attrs(self):
result = test_utils.extract_node('''
from collections import namedtuple
namedtuple('a', 'a b c')(1, 2, 3) #@
''')
inferred = next(result.infer())
for name, attr in inferred._instance_attrs.items():
self.assertEqual(attr[0].attrname, name)
def test_namedtuple_uninferable_fields(self):
node = test_utils.extract_node('''
x = [A] * 2
from collections import namedtuple
l = namedtuple('a', x)
l(1)
''')
inferred = next(node.infer())
self.assertIs(util.YES, inferred)
class ModuleExtenderTest(unittest.TestCase):
def testExtensionModules(self):
transformer = MANAGER._transform
for extender, _ in transformer.transforms[nodes.Module]:
n = nodes.Module('__main__', None)
extender(n)
@unittest.skipUnless(HAS_NOSE, "This test requires nose library.")
class NoseBrainTest(unittest.TestCase):
def test_nose_tools(self):
methods = test_utils.extract_node("""
from nose.tools import assert_equal
from nose.tools import assert_equals
from nose.tools import assert_true
assert_equal = assert_equal #@
assert_true = assert_true #@
assert_equals = assert_equals #@
""")
assert_equal = next(methods[0].value.infer())
assert_true = next(methods[1].value.infer())
assert_equals = next(methods[2].value.infer())
self.assertIsInstance(assert_equal, astroid.BoundMethod)
self.assertIsInstance(assert_true, astroid.BoundMethod)
self.assertIsInstance(assert_equals, astroid.BoundMethod)
self.assertEqual(assert_equal.qname(),
'unittest.case.TestCase.assertEqual')
self.assertEqual(assert_true.qname(),
'unittest.case.TestCase.assertTrue')
self.assertEqual(assert_equals.qname(),
'unittest.case.TestCase.assertEqual')
class SixBrainTest(unittest.TestCase):
def test_attribute_access(self):
ast_nodes = test_utils.extract_node('''
import six
six.moves.http_client #@
six.moves.urllib_parse #@
six.moves.urllib_error #@
six.moves.urllib.request #@
''')
http_client = next(ast_nodes[0].infer())
self.assertIsInstance(http_client, nodes.Module)
self.assertEqual(http_client.name,
'http.client' if six.PY3 else 'httplib')
urllib_parse = next(ast_nodes[1].infer())
if six.PY3:
self.assertIsInstance(urllib_parse, nodes.Module)
self.assertEqual(urllib_parse.name, 'urllib.parse')
else:
# On Python 2, this is a fake module, the same behaviour
# being mimicked in brain's tip for six.moves.
self.assertIsInstance(urllib_parse, astroid.Instance)
urljoin = next(urllib_parse.igetattr('urljoin'))
urlencode = next(urllib_parse.igetattr('urlencode'))
if six.PY2:
# In reality it's a function, but our implementations
# transforms it into a method.
self.assertIsInstance(urljoin, astroid.BoundMethod)
self.assertEqual(urljoin.qname(), 'urlparse.urljoin')
self.assertIsInstance(urlencode, astroid.BoundMethod)
self.assertEqual(urlencode.qname(), 'urllib.urlencode')
else:
self.assertIsInstance(urljoin, nodes.FunctionDef)
self.assertEqual(urljoin.qname(), 'urllib.parse.urljoin')
self.assertIsInstance(urlencode, nodes.FunctionDef)
self.assertEqual(urlencode.qname(), 'urllib.parse.urlencode')
urllib_error = next(ast_nodes[2].infer())
if six.PY3:
self.assertIsInstance(urllib_error, nodes.Module)
self.assertEqual(urllib_error.name, 'urllib.error')
else:
# On Python 2, this is a fake module, the same behaviour
# being mimicked in brain's tip for six.moves.
self.assertIsInstance(urllib_error, astroid.Instance)
urlerror = next(urllib_error.igetattr('URLError'))
self.assertIsInstance(urlerror, nodes.ClassDef)
content_too_short = next(urllib_error.igetattr('ContentTooShortError'))
self.assertIsInstance(content_too_short, nodes.ClassDef)
urllib_request = next(ast_nodes[3].infer())
if six.PY3:
self.assertIsInstance(urllib_request, nodes.Module)
self.assertEqual(urllib_request.name, 'urllib.request')
else:
self.assertIsInstance(urllib_request, astroid.Instance)
urlopen = next(urllib_request.igetattr('urlopen'))
urlretrieve = next(urllib_request.igetattr('urlretrieve'))
if six.PY2:
# In reality it's a function, but our implementations
# transforms it into a method.
self.assertIsInstance(urlopen, astroid.BoundMethod)
self.assertEqual(urlopen.qname(), 'urllib2.urlopen')
self.assertIsInstance(urlretrieve, astroid.BoundMethod)
self.assertEqual(urlretrieve.qname(), 'urllib.urlretrieve')
else:
self.assertIsInstance(urlopen, nodes.FunctionDef)
self.assertEqual(urlopen.qname(), 'urllib.request.urlopen')
self.assertIsInstance(urlretrieve, nodes.FunctionDef)
self.assertEqual(urlretrieve.qname(), 'urllib.request.urlretrieve')
def test_from_imports(self):
ast_node = test_utils.extract_node('''
from six.moves import http_client
http_client.HTTPSConnection #@
''')
inferred = next(ast_node.infer())
self.assertIsInstance(inferred, nodes.ClassDef)
if six.PY3:
qname = 'http.client.HTTPSConnection'
else:
qname = 'httplib.HTTPSConnection'
self.assertEqual(inferred.qname(), qname)
@unittest.skipUnless(HAS_MULTIPROCESSING,
'multiprocesing is required for this test, but '
'on some platforms it is missing '
'(Jython for instance)')
class MultiprocessingBrainTest(unittest.TestCase):
def test_multiprocessing_module_attributes(self):
# Test that module attributes are working,
# especially on Python 3.4+, where they are obtained
# from a context.
module = test_utils.extract_node("""
import multiprocessing
""")
module = module.do_import_module('multiprocessing')
cpu_count = next(module.igetattr('cpu_count'))
if sys.version_info < (3, 4):
self.assertIsInstance(cpu_count, nodes.FunctionDef)
else:
self.assertIsInstance(cpu_count, astroid.BoundMethod)
def test_module_name(self):
module = test_utils.extract_node("""
import multiprocessing
multiprocessing.SyncManager()
""")
inferred_sync_mgr = next(module.infer())
module = inferred_sync_mgr.root()
self.assertEqual(module.name, 'multiprocessing.managers')
def test_multiprocessing_manager(self):
# Test that we have the proper attributes
# for a multiprocessing.managers.SyncManager
module = builder.parse("""
import multiprocessing
manager = multiprocessing.Manager()
queue = manager.Queue()
joinable_queue = manager.JoinableQueue()
event = manager.Event()
rlock = manager.RLock()
bounded_semaphore = manager.BoundedSemaphore()
condition = manager.Condition()
barrier = manager.Barrier()
pool = manager.Pool()
list = manager.list()
dict = manager.dict()
value = manager.Value()
array = manager.Array()
namespace = manager.Namespace()
""")
queue = next(module['queue'].infer())
self.assertEqual(queue.qname(),
"{}.Queue".format(six.moves.queue.__name__))
joinable_queue = next(module['joinable_queue'].infer())
self.assertEqual(joinable_queue.qname(),
"{}.Queue".format(six.moves.queue.__name__))
event = next(module['event'].infer())
event_name = "threading.{}".format("Event" if six.PY3 else "_Event")
self.assertEqual(event.qname(), event_name)
rlock = next(module['rlock'].infer())
rlock_name = "threading._RLock"
self.assertEqual(rlock.qname(), rlock_name)
bounded_semaphore = next(module['bounded_semaphore'].infer())
semaphore_name = "threading.{}".format(
"BoundedSemaphore" if six.PY3 else "_BoundedSemaphore")
self.assertEqual(bounded_semaphore.qname(), semaphore_name)
pool = next(module['pool'].infer())
pool_name = "multiprocessing.pool.Pool"
self.assertEqual(pool.qname(), pool_name)
for attr in ('list', 'dict'):
obj = next(module[attr].infer())
self.assertEqual(obj.qname(),
"{}.{}".format(bases.BUILTINS, attr))
array = next(module['array'].infer())
self.assertEqual(array.qname(), "array.array")
manager = next(module['manager'].infer())
# Verify that we have these attributes
self.assertTrue(manager.getattr('start'))
self.assertTrue(manager.getattr('shutdown'))
@unittest.skipUnless(HAS_ENUM,
'The enum module was only added in Python 3.4. Support for '
'older Python versions may be available through the enum34 '
'compatibility module.')
class EnumBrainTest(unittest.TestCase):
def test_simple_enum(self):
module = builder.parse("""
import enum
class MyEnum(enum.Enum):
one = "one"
two = "two"
def mymethod(self, x):
return 5
""")
enum = next(module['MyEnum'].infer())
one = enum['one']
self.assertEqual(one.pytype(), '.MyEnum.one')
property_type = '{}.property'.format(bases.BUILTINS)
for propname in ('name', 'value'):
prop = next(iter(one.getattr(propname)))
self.assertIn(property_type, prop.decoratornames())
meth = one.getattr('mymethod')[0]
self.assertIsInstance(meth, astroid.FunctionDef)
def test_looks_like_enum_false_positive(self):
# Test that a class named Enumeration is not considered a builtin enum.
module = builder.parse('''
class Enumeration(object):
def __init__(self, name, enum_list):
pass
test = 42
''')
enum = module['Enumeration']
test = next(enum.igetattr('test'))
self.assertEqual(test.value, 42)
def test_enum_multiple_base_classes(self):
module = builder.parse("""
import enum
class Mixin:
pass
class MyEnum(Mixin, enum.Enum):
one = 1
""")
enum = next(module['MyEnum'].infer())
one = enum['one']
clazz = one.getattr('__class__')[0]
self.assertTrue(clazz.is_subtype_of('.Mixin'),
'Enum instance should share base classes with generating class')
def test_int_enum(self):
module = builder.parse("""
import enum
class MyEnum(enum.IntEnum):
one = 1
""")
enum = next(module['MyEnum'].infer())
one = enum['one']
clazz = one.getattr('__class__')[0]
int_type = '{}.{}'.format(bases.BUILTINS, 'int')
self.assertTrue(clazz.is_subtype_of(int_type),
'IntEnum based enums should be a subtype of int')
def test_enum_func_form_is_class_not_instance(self):
cls, instance = test_utils.extract_node('''
from enum import Enum
f = Enum('Audience', ['a', 'b', 'c'])
f #@
f() #@
''')
inferred_cls = next(cls.infer())
self.assertIsInstance(inferred_cls, nodes.ClassDef)
inferred_instance = next(instance.infer())
self.assertIsInstance(inferred_instance, bases.Instance)
@unittest.skipUnless(HAS_DATEUTIL, "This test requires the dateutil library.")
class DateutilBrainTest(unittest.TestCase):
def test_parser(self):
module = builder.parse("""
from dateutil.parser import parse
d = parse('2000-01-01')
""")
d_type = next(module['d'].infer())
self.assertEqual(d_type.qname(), "datetime.datetime")
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class NumpyBrainTest(unittest.TestCase):
def test_numpy(self):
node = test_utils.extract_node('''
import numpy
numpy.ones #@
''')
inferred = next(node.infer())
self.assertIsInstance(inferred, nodes.FunctionDef)
@unittest.skipUnless(HAS_PYTEST, "This test requires the pytest library.")
class PytestBrainTest(unittest.TestCase):
def test_pytest(self):
ast_node = test_utils.extract_node('''
import pytest
pytest #@
''')
module = next(ast_node.infer())
self.assertIn('deprecated_call', module)
self.assertIn('exit', module)
self.assertIn('fail', module)
self.assertIn('fixture', module)
self.assertIn('mark', module)
if __name__ == '__main__':
unittest.main()
| si618/pi-time | node_modules/grunt-pylint/tasks/lib/astroid/tests/unittest_brain.py | Python | gpl-3.0 | 18,053 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
class ResourceTypesTable(tables.DataTable):
name = tables.Column("resource_type",
verbose_name=_("Type"),
link="horizon:project:stacks.resource_types:details",)
def get_object_id(self, resource):
return resource.resource_type
class Meta(object):
name = "resource_types"
verbose_name = _("Resource Types")
table_actions = (tables.FilterAction,)
multi_select = False
| coreycb/horizon | openstack_dashboard/dashboards/project/stacks/resource_types/tables.py | Python | apache-2.0 | 1,101 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2016 Digital Catapult and The Copyright Hub Foundation
# (together the Open Permissions Platform Coalition)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""API Roles handler. Allows to create and modify roles
"""
from koi import auth
from perch import Token, User
from tornado.gen import coroutine
from .base import BaseHandler
class RolesHandler(BaseHandler):
"""Responsible for managing role resources
"""
@auth.auth_required(Token.valid)
@coroutine
def get(self):
"""Get all roles"""
roles = {x.value for x in User.roles}
result = [{'id': x, 'name': x.title()} for x in roles]
self.finish({
'status': 200,
'data': result
})
| openpermissions/accounts-srv | accounts/controllers/roles_handler.py | Python | gpl-3.0 | 1,354 |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
def remove(project=None, only="default", packages=[], clean=True):
from passa.models.lockers import PinReuseLocker
from passa.operations.lock import lock
default = (only != "dev")
develop = (only != "default")
project = project
project.remove_keys_from_pipfile(
packages, default=default, develop=develop,
)
locker = PinReuseLocker(project)
success = lock(locker)
if not success:
return 1
project._p.write()
project._l.write()
print("Written to project at", project.root)
if not clean:
return
from passa.models.synchronizers import Cleaner
from passa.operations.sync import clean
cleaner = Cleaner(project, default=True, develop=True)
success = clean(cleaner)
if not success:
return 1
print("Cleaned project at", project.root)
| kennethreitz/pipenv | pipenv/vendor/passa/actions/remove.py | Python | mit | 946 |
from __future__ import print_function
import os
import unittest
import warnings
import numpy as np
warnings.simplefilter('always')
np.seterr(all='raise')
import pyNastran
from pyNastran.converters.aflr2.aflr2 import read_bedge
from pyNastran.gui.testing_methods import GUIMethods
from pyNastran.converters.aflr2.bedge_io import BEdge_IO
pkg_path = pyNastran.__path__[0]
model_path = os.path.join(pkg_path, 'converters', 'aflr2')
class BEdge_GUI(BEdge_IO, GUIMethods):
def __init__(self):
GUIMethods.__init__(self)
BEdge_IO.__init__(self)
class TestBEdgeGUI(unittest.TestCase):
def test_bedge_geometry(self):
"""tests the bedge gui"""
bedge_filename = os.path.join(model_path, 'm3.bedge')
dirname = None
test = BEdge_GUI()
test.load_bedge_geometry(bedge_filename, dirname)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| saullocastro/pyNastran | pyNastran/converters/aflr2/test_bedge_gui.py | Python | lgpl-3.0 | 912 |
#!/usr/bin/env python3
from datetime import date
from mathmodeling import misc, google, wikipedia
from scipy.interpolate import interp1d
from scipy.integrate import quad as integ
from sys import argv, stdout
import subprocess, os, os.path
def search_data(row):
return (float(row[0]), float(row[1]))
def article_data(row):
return (
misc.date_to_float(date(*map(int, row[0].split('-')))),
int(row[1])
)
def interp(rows, start, kind, integrate):
# interpolate function from data
f = interp1d(
[row[0] for row in rows], [row[1] for row in rows],
kind = kind, copy = False, assume_sorted = False
)
# numerically integrate function
if integrate:
def g(t, cache = [start, 0]):
# cache intermediate integrals for speed
y = (
integ(f, cache[0], t, full_output = 1)[0] + cache[1]
if cache[0] < t else
integ(f, start, t, full_output = 1)[0]
)
cache[:] = (t, y)
return y
return g
# don't integrate
else:
return f
def parsed_time(d):
d += '/parsed.csv'
return os.stat(d).st_mtime if os.path.exists(d) else 0
def google_data(google_term, search_interp, google_cookie):
# print Google message
print("Getting Google Trends for ", google_term, "...", sep = '', end = '')
stdout.flush()
# grab Google data
if os.path.exists('raw.csv'):
print("loading...", end = '')
stdout.flush()
google_data = misc.fromcsv('raw.csv', google.RawGData)
else:
print("requesting...", end = '')
stdout.flush()
google_data = google.request(google_term, google_cookie)
misc.tocsv(google_data, google.RawGData.__slots__, 'raw.csv')
# fix Google data
print("parsing...", end = '')
stdout.flush()
google_data = google.augment(google_data, search_interp)
misc.tocsv(google_data, google.GData.__slots__, 'parsed.csv')
print("done.")
def wiki_data(wiki_page, article_interp):
# print Wikipedia message
print("Getting Wikipedia page for ", wiki_page, "...", sep = '', end = '')
stdout.flush()
# grab Wikipedia data
if os.path.exists('raw.csv'):
print("loading...", end = '')
stdout.flush()
wiki_data = misc.fromcsv('raw.csv', wikipedia.RawWData)
else:
print("requesting...", end = '')
stdout.flush()
wiki_data = wikipedia.request(wiki_page)
misc.tocsv(wiki_data, wikipedia.RawWData.__slots__, 'raw.csv')
# remove reverts from data
print("reverts...", end = '')
stdout.flush()
wiki_data = wikipedia.revert_fix(wiki_data)
misc.tocsv(wiki_data, wikipedia.RawWData.__slots__, 'reverts.csv')
# remove spikes from data
print("spikes...", end = '')
stdout.flush()
wiki_data = wikipedia.spike_fix(wiki_data)
misc.tocsv(wiki_data, wikipedia.RawWData.__slots__, 'spikes.csv')
# fix Wikipedia data
print("parsing...", end = '')
stdout.flush()
wiki_data = wikipedia.augment(wiki_data, article_interp)
misc.tocsv(wiki_data, wikipedia.WData.__slots__, 'parsed.csv')
print("done.")
def main():
# grab Google cookie
if len(argv) < 2:
return print("error: cookie not provided")
google_cookie = argv[1]
# grab terms data
gterms = set()
wpages = set()
for gterm, wpage in misc.fromcsv('data/terms.csv', tuple):
gterms.add(gterm)
wpages.add(wpage)
# grab terms/pages data
pwd = os.getcwd()
gdata = pwd + '/data/google/'
wdata = pwd + '/data/wikipedia/'
graph = pwd + '/graph.R'
# load searches/articles data
searches = misc.fromcsv('data/searches.csv', search_data)
articles = misc.fromcsv('data/articles.csv', article_data)
# interpolate searches/articles data
sf = interp(searches, misc.date_to_float(date(1998, 12, 1)), 'cubic', True)
af = interp(articles, misc.date_to_float(date(2001, 1, 10)), 'linear', False)
# check changed times of modules
gtime = os.stat('mathmodeling/google.py').st_mtime
wtime = os.stat('mathmodeling/wikipedia.py').st_mtime
# grab terms
for d in gterms:
os.makedirs(gdata + d, mode = 0o755, exist_ok = True)
os.chdir(gdata + d)
if parsed_time(gdata + d) < gtime:
google_data(d, sf, google_cookie)
else:
print("Data for Google Trends for", d, "is up to date.")
# grab pages
for d in wpages:
os.makedirs(wdata + d, mode = 0o755, exist_ok = True)
os.chdir(wdata + d)
if parsed_time(wdata + d) < wtime:
wiki_data(d, af)
else:
print("Data for Wikipedia page for", d, "is up to date.")
# get back out of directory
os.chdir(pwd)
# generate graphs
print("Generating graphs...")
with open(graph) as f:
subprocess.call(
['R', '--vanilla', '--quiet'],
stdin = f, stdout = subprocess.DEVNULL
)
if __name__ == "__main__":
main()
| Undeterminant/math-modeling | accum-data.py | Python | cc0-1.0 | 4,502 |
import numpy as np
from scipy.signal import firwin, kaiserord, convolve2d, decimate
#from matplotlib import pyplot as plt
# DEFINE FILTERS FOR PREPROCESSING:
def preprocess_multichannel_data(matrix,params):
"""
:param matrix: multichannel EEG data
:param fs: sampling frequency
:return: data without mains, electrical artefacts, aliasing
authors: Lea and Vincent
"""
assert(type(matrix)==np.ndarray)
#print 'downsample...'
matrix,fs = downsample(matrix,params)
params['fs']=fs # update sampling rate
#print 'initial ', matrix.shape
matrix = remove_elec_noise(matrix,params)
#print 'elec noise ', matrix.shape
matrix = anti_alias_filter(matrix,params)
#print 'anti alias ', matrix.shape
matrix = remove_dc(matrix)
#print 'dc ', matrix.shape
return matrix
def downsample(matrix,params):
"""
:param matrix: multichannel EEG data
:param params: takes in sampling frequency fs from params dict
:return: downsampled data; downsample to dogfrequency but maximum downsampling rate set to 8
"""
fs = int(params['fs'])
targetrate = int(params['targetrate'])
dsfactor = max(1,int(fs/targetrate)) #should come out to 1, i.e. no downsampling for dogs
#print 'dsfactor calculated =', dsfactor
#maxdsfactor = int(8)
#if dsfactor > maxdsfactor:
# dsfactor = maxdsfactor
#print 'dsfactor used =', dsfactor
if dsfactor>1:
ds_matrix = decimate(matrix,dsfactor,axis=1)
return ds_matrix, float(fs/dsfactor)
else:
return matrix, float(fs)
#ds_list=[]
#for i in range(matrix.shape[0]):
# x = matrix[i,:]
# ds_x = decimate(x, dsfactor)
# ds_list.append(ds_x)
#ds_matrix = np.asarray(ds_list)
def remove_dc(x):
#print x.shape
assert(type(x)==np.ndarray)
"""
Remove mean of signal
:return: x - mean(x)
"""
x_dc = np.zeros(x.shape)
for i in range(x.shape[0]):
x_dc[i,:] = x[i,:] - np.mean(x[i,:])
return x_dc
# build custom filter; inspiration : http://wiki.scipy.org/Cookbook/FIRFilter
def build_filter(fs,cutoff,width,attenuation):
"""
Building low pass filter impulse response0
:param fs: sampling rate
:return: 1D array impulse response of lp filter
"""
# define params:
nyq_rate = fs / 2.0 # Nyquist frequency
if type(cutoff)==list:
cutoff = [c/nyq_rate for c in cutoff]
else:
cutoff=cutoff/nyq_rate
# Compute the order and Kaiser parameter for the FIR filter:
N, beta = kaiserord(attenuation, width/nyq_rate)
# Use firwin with a Kaiser window to create a lowpass FIR filter:
if N%2==0:
N+=1# odd trick
taps = firwin(N, cutoff, window=('kaiser', beta),pass_zero=True)
return taps
def remove_elec_noise(x,params):
fs = params['fs']
# import the relevant filters from signal processing library
assert(type(x)==np.ndarray)
cutoff = params['elec_noise_cutoff']
width = params['elec_noise_width']
attenuation = params['elec_noise_attenuation']
f = build_filter(fs,cutoff,width,attenuation)
f = np.expand_dims(f,axis=0)
#print x.shape, f.shape
filtered_x = convolve2d(x,f,mode='same') # NB: mode='same' cuts beginning & end
return filtered_x
def anti_alias_filter(x,params):
"""
Anti_aliasing: use Nyquist frequ cutoff low-pass filter
:return: anti-aliased signal
"""
fs = params['fs']
assert(type(x)==np.ndarray)
# def build_aa_filter(fs):
# """
# :param fs: sampling rate
# :return: 1D array impulse response using nyq frequ filter
# """
# # define params:
# nyq_rate = fs / 2.0 # Nyquist frequency
# width = 1000.0/nyq_rate # width of the transition from pass to stop relative to the Nyquist rate; here: 5 Hz
# ripple_db = 120.0 # attenuation in the stop band, in dB.
# cutoff_hz = nyq_rate - 1 # cutoff freq
#
# # Compute the order and Kaiser parameter for the FIR filter:
# N, beta = kaiserord(ripple_db, width)
#
# # Use firwin with a Kaiser window to create a lowpass FIR filter:
# taps = firwin(N, cutoff_hz/nyq_rate, window=('kaiser', beta))
#
# return taps
cutoff = params['anti_alias_cutoff']
width = params['anti_alias_width']
attenuation = params['anti_alias_attenuation']
if (cutoff==None) or (cutoff>=fs/2.):
return x
else:
f = build_filter(fs,cutoff,width,attenuation)
f = np.expand_dims(f,axis=0)
filtered_x = convolve2d(x,f,mode='same') # NB: mode='same' cuts beginning & end
return filtered_x
| vincentadam87/gatsby-hackathon-seizure | code/python/seizures/preprocessing/preprocessing.py | Python | bsd-2-clause | 4,687 |
# Create your views here.
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from livecode.helpgit.forms import UserForm, UserProfileForm
def user_login(request):
"""
Function for handling authentication of a user
:param request object.
"""
if request.method =="POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect('https://vaibhawlabs.pythonanywhere.com/new/')
else:
return HttpResponse("Your account has been diasbled")
else:
return HttpResponse("Invalid Username/Password")
else:
return render(request, 'login.html')
def files(request):
"""
Function for handling file upload.
:param request object.
"""
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect('https://vaibhawlabs.pythonanywhere.com/files/')
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'welcome.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def new(request):
return render(request,'new.html')
def about(request):
return render(request,'about.html')
def search(request):
"""
Function for searching over wikipedia.
:param request object.
"""
suicide_list = ["How to commit suicide", "how to commit suicide","I want to commit suicide", "i want to commit suicide", "I don't want to live", "i don't want to live", "I don't want to live anymore","i don't want to live anymore", "I want to die", "i want to die"]
if 'q' in request.GET:
if request.GET['q'] in suicide_list:
return render_to_response(
'search.html',
{'text':'Need help? Call! "Aasra" - 022 2754 6669'},
context_instance=RequestContext(request)
)
else:
try:
import wikipedia
text = wikipedia.summary(request.GET['q'], sentences=10)
return render_to_response(
'search.html',
{'text': text},
context_instance=RequestContext(request)
)
except:
return redirect ('https://vaibhawlabs.pythonanywhere.com/new/')
def register(request):
"""
Function for handling registration.
:param request object.
"""
registered = False
if request.method=="POST":
user_form = UserForm(data=request.POST)
profile_form=UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile=profile_form.save(commit=False)
profile.user = user
profile.save()
registered = True
return redirect ('https://vaibhawlabs.pythonanywhere.com')
else:
user_form=UserForm()
profile_form=UserProfileForm()
return render(request, 'register.html', {'user_form':user_form, 'profile_form': profile_form, 'registered': registered} )
def user_logout(request):
logout(request)
return render (request, 'login.html')
def robot(request):
return render (request, 'robot.html')
| vaibhawvipul/WikipediaForBlinds | livecode/helpgit/views.py | Python | gpl-2.0 | 4,214 |
#! /usr/bin/env python
import xml.etree.ElementTree as ET
import shutil
import sys
import os
from PyQt5 import QtCore, QtGui, QtWidgets
class PackItemModel(object):
def __init__(self):
self.item_id = None
self.name = ""
self.page = ""
self.desc = ""
self.data = None
class PackItemViewModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
super(PackItemViewModel, self).__init__(parent)
self.items = []
self.headers = [
"Name",
"Page",
"Description"]
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.items)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.headers)
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation != QtCore.Qt.Horizontal:
return None
if role == QtCore.Qt.DisplayRole:
return self.headers[section]
return None
def set_root(self, tree, file_name):
self.tree = tree
self.file_name = file_name
def save(self):
self.tree.write(self.file_name)
def add_item(self, itm):
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.items.append(itm)
self.endInsertRows()
def add_items(self, items):
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row + len(items) - 1)
self.items += items
self.endInsertRows()
def clean(self):
self.beginResetModel()
self.items = []
self.endResetModel()
def data(self, index, role):
if not index.isValid() or index.row() >= len(self.items):
return None
item = self.items[index.row()]
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole or role == QtCore.Qt.ToolTipRole:
if index.column() == 0:
return item.attrib['name']
if index.column() == 1:
return get_page_number(item)
if index.column() == 2:
return get_description(item)
elif role == QtCore.Qt.UserRole:
return item
return None
def setData(self, index, value, role):
if not index.isValid():
return False
item = self.items[index.row()]
ret = False
if role == QtCore.Qt.EditRole:
if index.column() == 1:
item.attrib['page'] = value
ret = True
if index.column() == 2:
set_description(item, value)
ret = True
else:
ret = super(PackItemViewModel, self).setData(index, value, role)
if ret:
self.save()
return ret
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsDropEnabled
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if index.column() == 1 or index.column() == 2:
flags |= QtCore.Qt.ItemIsEditable
return flags
class BookInfoEditor(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(BookInfoEditor, self).__init__(parent)
self.setWindowTitle("Book info editor")
self.model = PackItemViewModel()
read_items(self.model)
self.setMinimumSize(800, 600)
table = self.build_grid()
self.setCentralWidget(table)
table.setVisible(False)
table.resizeColumnsToContents()
table.setVisible(True)
def build_grid(self):
item_view = QtWidgets.QTableView()
item_view.setSortingEnabled(True)
item_view.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Interactive)
item_view.horizontalHeader().setStretchLastSection(True)
item_view.horizontalHeader().setCascadingSectionResizes(True)
item_view.doubleClicked.connect(self.on_item_activated)
self._sort_model = QtCore.QSortFilterProxyModel(self)
self._sort_model.setDynamicSortFilter(True)
self._sort_model.setSourceModel(self.model)
item_view.setModel(self._sort_model)
item_view.setMinimumSize(600, 600)
return item_view
def on_item_activated(self, index):
pass
def main():
app = QtWidgets.QApplication(sys.argv)
editor = BookInfoEditor()
editor.show()
sys.exit(app.exec_())
def read_input(prompt):
print(prompt)
buffer = []
prev = None
while True:
line = sys.stdin.readline().rstrip('\n')
if line == '' and prev == '':
break
else:
buffer.append(line)
prev = line
return ' '.join(buffer)
def read_sub_element_text(xml_element, sub_element_name, default_value=None):
return xml_element.find(sub_element_name).text if (
xml_element.find(sub_element_name) is not None) else default_value
def write_sub_element_text(xml_element, sub_element_name, text=""):
sub_elem = xml_element.find(sub_element_name)
if sub_elem is None:
sub_elem = ET.Element(sub_element_name)
sub_elem.text = text
xml_element.append(sub_elem)
else:
print(f'set text sub element {sub_element_name}')
sub_elem.text = text
def get_page_number(elem):
if 'page' in elem.attrib:
return elem.attrib['page']
return ''
def set_page_number(elem, pg):
elem.attrib['page'] = pg
def get_description(elem):
return read_sub_element_text(elem, "Description")
def set_description(elem, text):
lines = text.split()
text = ' '.join(lines)
write_sub_element_text(elem, "Description", text)
def read_items(model):
file_name = sys.argv[1]
# backup original
backup_file = file_name + '.bak'
if not os.path.exists(backup_file):
shutil.copyfile(file_name, backup_file)
tree = ET.parse(file_name)
root = tree.getroot()
model.set_root(tree, file_name)
for elem in root:
model.add_item(elem)
#normalize, just one run
#desc = get_description(elem)
#if desc:
# set_description(elem, desc)
#tree.write(file_name)
if __name__ == '__main__':
main() | OpenNingia/l5r-character-manager-3 | tools/scripts/bookinfo2.py | Python | gpl-3.0 | 6,292 |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.actions import do_change_user_delivery_email
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Change the email address for a user."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('old_email', metavar='<old email>', type=str,
help='email address to change')
parser.add_argument('new_email', metavar='<new email>', type=str,
help='new email address')
def handle(self, *args: Any, **options: str) -> None:
old_email = options['old_email']
new_email = options['new_email']
realm = self.get_realm(options)
user_profile = self.get_user(old_email, realm)
do_change_user_delivery_email(user_profile, new_email)
| timabbott/zulip | zerver/management/commands/change_user_email.py | Python | apache-2.0 | 922 |
from config import config, ConfigSubsection, ConfigSlider, ConfigYesNo, ConfigNothing
from enigma import eDBoxLCD
from Components.SystemInfo import SystemInfo
# [ iqteam
import fcntl
# iqteam ]
class LCD:
def __init__(self):
pass
def setBright(self, value):
value *= 255
value /= 10
if value > 255:
value = 255
eDBoxLCD.getInstance().setLCDBrightness(value)
def setContrast(self, value):
value *= 63
value /= 20
if value > 63:
value = 63
eDBoxLCD.getInstance().setLCDContrast(value)
def setInverted(self, value):
if value:
value = 255
eDBoxLCD.getInstance().setInverted(value)
def setFlipped(self, value):
eDBoxLCD.getInstance().setFlipped(value)
def isOled(self):
return eDBoxLCD.getInstance().isOled()
def leaveStandby():
config.lcd.bright.apply()
def standbyCounterChanged(configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
config.lcd.standby.apply()
def InitLcd():
detected = eDBoxLCD.getInstance().detected()
SystemInfo["Display"] = detected
config.lcd = ConfigSubsection();
if detected:
def setLCDbright(configElement):
ilcd.setBright(configElement.value);
def setLCDcontrast(configElement):
ilcd.setContrast(configElement.value);
def setLCDinverted(configElement):
ilcd.setInverted(configElement.value);
def setLCDflipped(configElement):
ilcd.setFlipped(configElement.value);
standby_default = 0
ilcd = LCD()
if not ilcd.isOled():
config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20))
config.lcd.contrast.addNotifier(setLCDcontrast);
else:
config.lcd.contrast = ConfigNothing()
standby_default = 1
config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10))
config.lcd.standby.addNotifier(setLCDbright);
config.lcd.standby.apply = lambda : setLCDbright(config.lcd.standby)
config.lcd.bright = ConfigSlider(default=5, limits=(0, 10))
config.lcd.bright.addNotifier(setLCDbright);
config.lcd.bright.apply = lambda : setLCDbright(config.lcd.bright)
config.lcd.bright.callNotifiersOnSaveAndCancel = True
config.lcd.invert = ConfigYesNo(default=False)
config.lcd.invert.addNotifier(setLCDinverted);
config.lcd.flip = ConfigYesNo(default=False)
config.lcd.flip.addNotifier(setLCDflipped);
else:
def doNothing():
pass
config.lcd.contrast = ConfigNothing()
config.lcd.bright = ConfigNothing()
config.lcd.standby = ConfigNothing()
config.lcd.bright.apply = lambda : doNothing()
config.lcd.standby.apply = lambda : doNothing()
config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call = False)
# [iq
config.vfd_scroll.addNotifier(setFpmRotate)
def setFpmRotate(configElement):
LCD_IOCTL_ROTATE_START = 4
lcd = open('/dev/dbox/lcd0', 'w')
on = 2 if configElement.value else 0
fcntl.ioctl(lcd, LCD_IOCTL_ROTATE_START, on)
# iq]
| pli3/enigma2-git | lib/python/Components/Lcd.py | Python | gpl-2.0 | 2,860 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
PandasOnSparkHistPlot,
PandasOnSparkBarPlot,
PandasOnSparkBoxPlot,
PandasOnSparkPiePlot,
PandasOnSparkAreaPlot,
PandasOnSparkLinePlot,
PandasOnSparkBarhPlot,
PandasOnSparkScatterPlot,
PandasOnSparkKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
_common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"}
_series_kinds = _common_kinds.union(set())
_dataframe_kinds = _common_kinds.union({"scatter", "hexbin"})
_pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds)
def plot_pandas_on_spark(data, kind, **kwargs):
if kind not in _pandas_on_spark_all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
from pyspark.pandas import DataFrame, Series
if isinstance(data, Series):
if kind not in _series_kinds:
return unsupported_function(class_name="pd.Series", method_name=kind)()
return plot_series(data=data, kind=kind, **kwargs)
elif isinstance(data, DataFrame):
if kind not in _dataframe_kinds:
return unsupported_function(class_name="pd.DataFrame", method_name=kind)()
return plot_frame(data=data, kind=kind, **kwargs)
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = PandasMPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds,
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``psdf.plot(kind='line')`` is equivalent to
``psdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds,
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from pyspark.pandas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle pandas-on-Spark DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
| hvanhovell/spark | python/pyspark/pandas/plot/matplotlib.py | Python | apache-2.0 | 30,172 |
from sketch_components.utils import js_represent
class Prop(object):
def __init__(self, value, is_literal=False):
self.value = value
self.is_literal = is_literal
def __repr__(self):
if self.is_literal:
return "{%s}" % str(self.value)
return "{%s}" % self.represent(self.value)
@classmethod
def represent(cls, value):
return js_represent(value)
def __str__(self):
return repr(self)
| ibhubs/sketch-components | sketch_components/engines/react/base/props.py | Python | mit | 466 |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from wlauto import AndroidUiAutoBenchmark, Parameter, Alias
from wlauto.exceptions import ConfigError
class Andebench(AndroidUiAutoBenchmark):
name = 'andebench'
description = """
AndEBench is an industry standard Android benchmark provided by The
Embedded Microprocessor Benchmark Consortium (EEMBC).
http://www.eembc.org/andebench/about.php
From the website:
- Initial focus on CPU and Dalvik interpreter performance
- Internal algorithms concentrate on integer operations
- Compares the difference between native and Java performance
- Implements flexible multicore performance analysis
- Results displayed in Iterations per second
- Detailed log file for comprehensive engineering analysis
"""
package = 'com.eembc.coremark'
activity = 'com.eembc.coremark.splash'
summary_metrics = ['AndEMark Java', 'AndEMark Native']
parameters = [
Parameter('number_of_threads', kind=int,
description='Number of threads that will be spawned by AndEBench.'),
Parameter('single_threaded', kind=bool,
description="""
If ``true``, AndEBench will run with a single thread. Note: this must
not be specified if ``number_of_threads`` has been specified.
"""),
]
aliases = [
Alias('andebenchst', number_of_threads=1),
]
regex = re.compile('\s*(?P<key>(AndEMark Native|AndEMark Java))\s*:'
'\s*(?P<value>\d+)')
def validate(self):
if (self.number_of_threads is not None) and (self.single_threaded is not None): # pylint: disable=E1101
raise ConfigError('Can\'t specify both number_of_threads and single_threaded parameters.')
def setup(self, context):
if self.number_of_threads is None: # pylint: disable=access-member-before-definition
if self.single_threaded: # pylint: disable=E1101
self.number_of_threads = 1 # pylint: disable=attribute-defined-outside-init
else:
self.number_of_threads = self.device.number_of_cores # pylint: disable=W0201
self.logger.debug('Using {} threads'.format(self.number_of_threads))
self.uiauto_params['number_of_threads'] = self.number_of_threads
# Called after this setup as modifying uiauto_params
super(Andebench, self).setup(context)
def update_result(self, context):
super(Andebench, self).update_result(context)
results = {}
with open(self.logcat_log) as fh:
for line in fh:
match = self.regex.search(line)
if match:
data = match.groupdict()
results[data['key']] = data['value']
for key, value in results.iteritems():
context.result.add_metric(key, value)
| freedomtan/workload-automation | wlauto/workloads/andebench/__init__.py | Python | apache-2.0 | 3,485 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from functools import wraps # noqa
import json
import os
from ceilometerclient.v2 import client as ceilometer_client
from cinderclient import client as cinder_client
from django.conf import settings
from django.contrib.messages.storage import default_storage # noqa
from django.core.handlers import wsgi
from django.core import urlresolvers
from django.test.client import RequestFactory # noqa
from django.test import utils as django_test_utils
from django.utils.importlib import import_module # noqa
from django.utils import unittest
import glanceclient
from heatclient import client as heat_client
import httplib2
from keystoneclient.v2_0 import client as keystone_client
import mock
import mox
from neutronclient.v2_0 import client as neutron_client
from novaclient.v2 import client as nova_client
from openstack_auth import user
from openstack_auth import utils
from saharaclient import client as sahara_client
from swiftclient import client as swift_client
from troveclient import client as trove_client
from horizon import base
from horizon import conf
from horizon.test import helpers as horizon_helpers
from openstack_dashboard import api
from openstack_dashboard import context_processors
from openstack_dashboard.test.test_data import utils as test_utils
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
def create_stubs(stubs_to_create={}):
"""decorator to simplify setting up multiple stubs at once via mox
:param stubs_to_create: methods to stub in one or more modules
:type stubs_to_create: dict
The keys are python paths to the module containing the methods to mock.
To mock a method in openstack_dashboard/api/nova.py, the key is::
api.nova
The values are either a tuple of list of methods to mock in the module
indicated by the key.
For example::
('server_list',)
-or-
('flavor_list', 'server_list',)
-or-
['flavor_list', 'server_list']
Additionally, multiple modules can be mocked at once::
{
api.nova: ('flavor_list', 'server_list'),
api.glance: ('image_list_detailed',),
}
"""
if not isinstance(stubs_to_create, dict):
raise TypeError("create_stub must be passed a dict, but a %s was "
"given." % type(stubs_to_create).__name__)
def inner_stub_out(fn):
@wraps(fn)
def instance_stub_out(self, *args, **kwargs):
for key in stubs_to_create:
if not (isinstance(stubs_to_create[key], tuple) or
isinstance(stubs_to_create[key], list)):
raise TypeError("The values of the create_stub "
"dict must be lists or tuples, but "
"is a %s."
% type(stubs_to_create[key]).__name__)
for value in stubs_to_create[key]:
self.mox.StubOutWithMock(key, value)
return fn(self, *args, **kwargs)
return instance_stub_out
return inner_stub_out
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(horizon_helpers.TestCase):
"""Specialized base test case class for Horizon.
It gives access to numerous additional features:
* A full suite of test data through various attached objects and
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
docs for
:class:`~openstack_dashboard.test.test_data.utils.TestData`
for more information.
* The ``mox`` mocking framework via ``self.mox``.
* A set of request context data via ``self.context``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
* The ability to override specific time data controls for easier testing.
* Several handy additional assertion methods.
"""
def setUp(self):
def fake_conn_request(*args, **kwargs):
raise Exception("An external URI request tried to escape through "
"an httplib2 client. Args: %s, kwargs: %s"
% (args, kwargs))
self._real_conn_request = httplib2.Http._conn_request
httplib2.Http._conn_request = fake_conn_request
self._real_context_processor = context_processors.openstack
context_processors.openstack = lambda request: self.context
self.patchers = {}
self.add_panel_mocks()
super(TestCase, self).setUp()
def _setup_test_data(self):
super(TestCase, self)._setup_test_data()
test_utils.load_test_data(self)
self.context = {'authorized_tenants': self.tenants.list()}
def _setup_factory(self):
# For some magical reason we need a copy of this here.
self.factory = RequestFactoryWithMessages()
def _setup_user(self):
self._real_get_user = utils.get_user
tenants = self.context['authorized_tenants']
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
domain_id=self.domain.id,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=tenants)
def _setup_request(self):
super(TestCase, self)._setup_request()
self.request.session['token'] = self.token.id
def add_panel_mocks(self):
"""Global mocks on panels that get called on all views."""
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
def tearDown(self):
httplib2.Http._conn_request = self._real_conn_request
context_processors.openstack = self._real_context_processor
utils.get_user = self._real_get_user
mock.patch.stopall()
super(TestCase, self).tearDown()
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True, domain_id=None):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
domain_id=domain_id,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
def assertRedirectsNoFollow(self, response, expected_url):
"""Check for redirect.
Asserts that the given response issued a 302 redirect without
processing the view which is redirected to.
"""
assert (response.status_code / 100 == 3), \
"The response did not return a redirect."
self.assertEqual(response._headers.get('location', None),
('Location', settings.TESTSERVER + expected_url))
self.assertEqual(response.status_code, 302)
def assertNoFormErrors(self, response, context_name="form"):
"""Checks for no form errors.
Asserts that the response either does not contain a form in its
context, or that if it does, that form has no errors.
"""
context = getattr(response, "context", {})
if not context or context_name not in context:
return True
errors = response.context[context_name]._errors
assert len(errors) == 0, \
"Unexpected errors were found on the form: %s" % errors
def assertFormErrors(self, response, count=0, message=None,
context_name="form"):
"""Check for form errors.
Asserts that the response does contain a form in its
context, and that form has errors, if count were given,
it must match the exact numbers of errors
"""
context = getattr(response, "context", {})
assert (context and context_name in context), \
"The response did not contain a form."
errors = response.context[context_name]._errors
if count:
assert len(errors) == count, \
"%d errors were found on the form, %d expected" % \
(len(errors), count)
if message and message not in unicode(errors):
self.fail("Expected message not found, instead found: %s"
% ["%s: %s" % (key, [e for e in field_errors]) for
(key, field_errors) in errors.items()])
else:
assert len(errors) > 0, "No errors were found on the form"
def assertStatusCode(self, response, expected_code):
"""Validates an expected status code.
Matches camel case of other assert functions
"""
if response.status_code == expected_code:
return
self.fail('status code %r != %r: %s' % (response.status_code,
expected_code,
response.content))
def assertItemsCollectionEqual(self, response, items_list):
self.assertEqual(response.content,
'{"items": ' + json.dumps(items_list) + "}")
@staticmethod
def mock_rest_request(**args):
mock_args = {
'user.is_authenticated.return_value': True,
'is_ajax.return_value': True,
'policy.check.return_value': True,
'body': ''
}
mock_args.update(args)
return mock.Mock(**mock_args)
class BaseAdminViewTests(TestCase):
"""Sets an active user with the "admin" role.
For testing admin-only views and functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(BaseAdminViewTests, self).setActiveUser(*args, **kwargs)
def setSessionValues(self, **kwargs):
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
for key in kwargs:
store[key] = kwargs[key]
self.request.session[key] = kwargs[key]
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
class APITestCase(TestCase):
"""Testing APIs.
For use with tests which deal with the underlying clients rather than
stubbing out the openstack_dashboard.api.* methods.
"""
def setUp(self):
super(APITestCase, self).setUp()
utils.patch_middleware_get_user()
def fake_keystoneclient(request, admin=False):
"""Returns the stub keystoneclient.
Only necessary because the function takes too many arguments to
conveniently be a lambda.
"""
return self.stub_keystoneclient()
# Store the original clients
self._original_glanceclient = api.glance.glanceclient
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
self._original_neutronclient = api.neutron.neutronclient
self._original_cinderclient = api.cinder.cinderclient
self._original_heatclient = api.heat.heatclient
self._original_ceilometerclient = api.ceilometer.ceilometerclient
self._original_troveclient = api.trove.troveclient
self._original_saharaclient = api.sahara.client
# Replace the clients with our stubs.
api.glance.glanceclient = lambda request: self.stub_glanceclient()
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
api.neutron.neutronclient = lambda request: self.stub_neutronclient()
api.cinder.cinderclient = lambda request: self.stub_cinderclient()
api.heat.heatclient = (lambda request, password=None:
self.stub_heatclient())
api.ceilometer.ceilometerclient = (lambda request:
self.stub_ceilometerclient())
api.trove.troveclient = lambda request: self.stub_troveclient()
api.sahara.client = lambda request: self.stub_saharaclient()
def tearDown(self):
super(APITestCase, self).tearDown()
api.glance.glanceclient = self._original_glanceclient
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
api.neutron.neutronclient = self._original_neutronclient
api.cinder.cinderclient = self._original_cinderclient
api.heat.heatclient = self._original_heatclient
api.ceilometer.ceilometerclient = self._original_ceilometerclient
api.trove.troveclient = self._original_troveclient
api.sahara.client = self._original_saharaclient
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_cinderclient(self):
if not hasattr(self, "cinderclient"):
self.mox.StubOutWithMock(cinder_client, 'Client')
self.cinderclient = self.mox.CreateMock(cinder_client.Client)
return self.cinderclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
# NOTE(saschpe): Mock properties, MockObject.__init__ ignores them:
keystone_client.Client.auth_token = 'foo'
keystone_client.Client.service_catalog = None
keystone_client.Client.tenant_id = '1'
keystone_client.Client.tenant_name = 'tenant_1'
keystone_client.Client.management_url = ""
keystone_client.Client.__dir__ = lambda: []
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def stub_glanceclient(self):
if not hasattr(self, "glanceclient"):
self.mox.StubOutWithMock(glanceclient, 'Client')
self.glanceclient = self.mox.CreateMock(glanceclient.Client)
return self.glanceclient
def stub_neutronclient(self):
if not hasattr(self, "neutronclient"):
self.mox.StubOutWithMock(neutron_client, 'Client')
self.neutronclient = self.mox.CreateMock(neutron_client.Client)
return self.neutronclient
def stub_swiftclient(self, expected_calls=1):
if not hasattr(self, "swiftclient"):
self.mox.StubOutWithMock(swift_client, 'Connection')
self.swiftclient = self.mox.CreateMock(swift_client.Connection)
while expected_calls:
swift_client.Connection(None,
mox.IgnoreArg(),
None,
preauthtoken=mox.IgnoreArg(),
preauthurl=mox.IgnoreArg(),
cacert=None,
insecure=False,
auth_version="2.0") \
.AndReturn(self.swiftclient)
expected_calls -= 1
return self.swiftclient
def stub_heatclient(self):
if not hasattr(self, "heatclient"):
self.mox.StubOutWithMock(heat_client, 'Client')
self.heatclient = self.mox.CreateMock(heat_client.Client)
return self.heatclient
def stub_ceilometerclient(self):
if not hasattr(self, "ceilometerclient"):
self.mox.StubOutWithMock(ceilometer_client, 'Client')
self.ceilometerclient = self.mox.\
CreateMock(ceilometer_client.Client)
return self.ceilometerclient
def stub_troveclient(self):
if not hasattr(self, "troveclient"):
self.mox.StubOutWithMock(trove_client, 'Client')
self.troveclient = self.mox.CreateMock(trove_client.Client)
return self.troveclient
def stub_saharaclient(self):
if not hasattr(self, "saharaclient"):
self.mox.StubOutWithMock(sahara_client, 'Client')
self.saharaclient = self.mox.CreateMock(sahara_client.Client)
return self.saharaclient
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(horizon_helpers.SeleniumTestCase):
def setUp(self):
super(SeleniumTestCase, self).setUp()
test_utils.load_test_data(self)
self.mox = mox.Mox()
self._real_get_user = utils.get_user
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=self.tenants.list())
self.patchers = {}
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
os.environ["HORIZON_TEST_RUN"] = "True"
def tearDown(self):
self.mox.UnsetStubs()
utils.get_user = self._real_get_user
mock.patch.stopall()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
class SeleniumAdminTestCase(SeleniumTestCase):
"""Version of AdminTestCase for Selenium.
Sets an active user with the "admin" role for testing admin-only views and
functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(SeleniumAdminTestCase, self).setActiveUser(*args, **kwargs)
def my_custom_sort(flavor):
sort_order = {
'm1.secret': 0,
'm1.tiny': 1,
'm1.massive': 2,
'm1.metadata': 3,
}
return sort_order[flavor.name]
class PluginTestCase(TestCase):
"""Test case for testing plugin system of Horizon.
For use with tests which deal with the pluggable dashboard and panel
configuration, it takes care of backing up and restoring the Horizon
configuration.
"""
def setUp(self):
super(PluginTestCase, self).setUp()
self.old_horizon_config = conf.HORIZON_CONFIG
conf.HORIZON_CONFIG = conf.LazySettings()
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = base.Horizon._registry.keys()
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = base.Horizon._registry[dash]._registry.keys()
self._discovered_panels[dash] = panels
def tearDown(self):
super(PluginTestCase, self).tearDown()
conf.HORIZON_CONFIG = self.old_horizon_config
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
reload(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
self._reload_urls()
def _reload_urls(self):
"""CLeans up URLs.
Clears out the URL caches, reloads the root urls module, and
re-triggers the autodiscovery mechanism for Horizon. Allows URLs
to be re-calculated after registering new dashboards. Useful
only for testing and should never be used on a live site.
"""
urlresolvers.clear_url_caches()
reload(import_module(settings.ROOT_URLCONF))
base.Horizon._urls()
class update_settings(django_test_utils.override_settings):
"""override_settings which allows override an item in dict.
django original override_settings replaces a dict completely,
however OpenStack dashboard setting has many dictionary configuration
and there are test case where we want to override only one item in
a dictionary and keep other items in the dictionary.
This version of override_settings allows this if keep_dict is True.
If keep_dict False is specified, the original behavior of
Django override_settings is used.
"""
def __init__(self, keep_dict=True, **kwargs):
if keep_dict:
for key, new_value in kwargs.items():
value = getattr(settings, key, None)
if (isinstance(new_value, collections.Mapping) and
isinstance(value, collections.Mapping)):
copied = copy.copy(value)
copied.update(new_value)
kwargs[key] = copied
super(update_settings, self).__init__(**kwargs)
| yjxtogo/horizon | openstack_dashboard/test/helpers.py | Python | apache-2.0 | 24,322 |
"""Wallet-Cashier URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from .views import *
urlpatterns = [
url(r'^$', Dashboard.as_view(), name='dashboard'),
url(r'^statistics/$', statistics_data, name='statistics'),
url(r'^contrast/$', contrast_data, name='contrast'),
url(r'^wallet/$', GetWallet.as_view(), name='wallet'),
url(r'^wallet/modify/$', modify_wallet, name='modify_wallet'),
url(r'^wallet/get/(?P<wallet_id>[0-9]+)/$', get_wallet, name='get_wallet'),
url(r'^income/$', Earn.as_view(), name='income'),
url(r'^income/add/$', add_income, name='add_income'),
url(r'^expense/$', Expend.as_view(), name='expense'),
url(r'^expense/add/$', add_expense, name='add_expense'),
url(r'^detail/$', Detail.as_view(), name='detail'),
]
| TrumanZCM/ChurchFinance | cashier/urls.py | Python | apache-2.0 | 1,401 |
# -*- coding: utf-8 -*-
# Testing facet-sphere interaction.
# A facet is rotated around Z axis. Test pass, if a sphere at (0,0) position is not moving (because in this case no transfer moment from the facet to the sphere), but a sphere at facet's edge moves with the facet (for this sphere blocked the rotation DOFs in order to remove rolling).
## PhysicalParameters
Density=2400
frictionAngle=radians(35)
tc = 0.001
en = 0.3
es = 0.3
## Import wall's geometry
params=utils.getViscoelasticFromSpheresInteraction(tc,en,es)
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params))
sphereMat=O.materials.append(ViscElMat(density=Density,frictionAngle=frictionAngle,**params))
facetId=O.bodies.append(utils.facet( [ (-1,0,0), (1,1,0), (1,-1,0)], material=facetMat,color=(1,0,0)))
sphIds=O.bodies.append([
utils.sphere( (0,0,0.1),0.1, material=sphereMat,color=(0,1,0)),
utils.sphere( (0.9,0,0.1),0.1, material=sphereMat,color=(0,1,0))
])
O.bodies[sphIds[1]].state.blockedDOFs='XYZ'
## Timestep
O.dt=.1*tc
## Engines
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
GravityEngine(gravity=[0,0,-9.81]),
NewtonIntegrator(damping=0),
RotationEngine(ids=[facetId],rotationAxis=[0,0,1],rotateAroundZero=True,angularVelocity=0.1)
]
from woo import qt
qt.View()
O.saveTmp()
#O.run()
| sjl767/woo | scripts/test-OLD/facet-sphere-ViscElBasic.py | Python | gpl-2.0 | 1,562 |
#coding=utf-8
import urllib
from HTMLParser import HTMLParser
# create a subclass and override the handler methods
class PriceHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
for attr in attrs:
print " attr:", attr
print "Encountered a start tag:", tag
def handle_endtag(self, tag):
print "Encountered an end tag :", tag
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
html = getHtml("http://sh.lianjia.com/ershoufang/rs%E9%BB%84%E6%B5%A6%E8%8A%B1%E5%9B%AD")
parser = PriceHTMLParser()
parser.feed(html)
| jackycheng/fang-analytics | price.py | Python | mit | 604 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
sample_load = '''
---
mesos:
box: fp/centos7
master:
instances: 3
memory: 256
ansible_groups:
- zookeeper
- mesos-master
slave:
instances: 2
memory: 256
ansible_groups:
- mesos-slave
registry:
instances: 1
memory: "{{ 1024 + 256 }}"
aliases:
- myregistry.vagrant
ansible_groups:
- docker-registry
'''
sample_load_witherrors = '''
---
users:
tj:
name: tj
age: 23
email: '[email protected]'
bob:
name: 'bob'
age: 27
ted: { name: ted, age: 32, email: [email protected] }
country:
name: Österreich
website: http://en.wikipedia.org/wiki/Austria
space:
description: space, the final frontier
brackets:
square: Square [brackets] can go in the middle of strings
squiggle: Squiggle {brackets} can also go in the middle of strings!
extrasquare: [Scratch that] brackets can go at the beginning as long as they close and have text after.
extrasquiggle: {Scratch that} squigs can go at the beginning also!
'''
| fabriziopandini/vagrant-playbook | vagrantplaybook/tests/playbook/sample/load.py | Python | mit | 1,150 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0002_auto_20151118_1403'),
]
operations = [
migrations.AddField(
model_name='jobs',
name='job_name',
field=models.CharField(default='test', max_length=200),
preserve_default=False,
),
]
| MRCIEU/melodi | browser/migrations/0003_jobs_job_name.py | Python | mit | 453 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-15 19:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import osf.models.base
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('osf', '0028_merge'),
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('folder_id', models.TextField(blank=True, null=True)),
('folder_name', models.TextField(blank=True, null=True)),
('folder_path', models.TextField(blank=True, null=True)),
('external_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_node_settings', to='osf.ExternalAccount')),
('owner', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_node_settings', to='osf.AbstractNode')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict)),
('owner', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_user_settings', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='nodesettings',
name='user_settings',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addons_figshare.UserSettings'),
),
]
| monikagrabowska/osf.io | addons/figshare/migrations/0001_initial.py | Python | apache-2.0 | 2,651 |
# -*- coding: utf-8 -*-
import logging
import os
from lxml import etree
from pytrainer.lib.xmlUtils import XMLParser
from pytrainer.gui.dialogs import fileChooserDialog, guiFlush
from pytrainer.core.activity import Activity
from sqlalchemy.orm import exc
class garminFIT():
def __init__(self, parent = None, validate=False):
self.parent = parent
self.pytrainer_main = parent.pytrainer_main
self.tmpdir = self.pytrainer_main.profile.tmpdir
self.data_path = os.path.dirname(__file__)
self.validate = validate
self.sport = self.getConfValue("Force_sport_to")
def getConfValue(self, confVar):
info = XMLParser(self.data_path+"/conf.xml")
code = info.getValue("pytrainer-plugin","plugincode")
plugindir = self.pytrainer_main.profile.plugindir
if not os.path.isfile(plugindir+"/"+code+"/conf.xml"):
value = None
else:
info = XMLParser(plugindir+"/"+code+"/conf.xml")
value = info.getValue("pytrainer-plugin",confVar)
return value
def run(self):
logging.debug(">>")
# able to select multiple files....
selectedFiles = fileChooserDialog(title="Choose a FIT file (or files) to import", multiple=True).getFiles()
guiFlush()
importfiles = []
if not selectedFiles: #Nothing selected
return importfiles
for filename in selectedFiles: #Multiple files
if self.valid_input_file(filename): #TODO could consolidate tree generation here
tree = etree.ElementTree(file=filename)
#Possibly multiple entries in file
activities = self.getActivities(tree)
for activity in activities:
if not self.inDatabase(activity):
sport = self.getSport(activity)
gpxfile = "%s/garmin-fit-%d.gpx" % (self.tmpdir, len(importfiles))
self.createGPXfile(gpxfile, activity)
importfiles.append((gpxfile, sport))
else:
logging.debug("File:%s activity %d already in database. Skipping import." % (filename, activities.index(activity)) )
else:
logging.info("File %s failed validation" % (filename))
logging.debug("<<")
return importfiles
def valid_input_file(self, filename):
""" Function to validate input file if requested"""
if not self.validate: #not asked to validate
logging.debug("Not validating %s" % (filename) )
return True
else:
xslfile = os.path.realpath(self.pytrainer_main.data_path)+ "/schemas/GarminTrainingCenterDatabase_v2.xsd"
from pytrainer.lib.xmlValidation import xmlValidator
validator = xmlValidator()
return validator.validateXSL(filename, xslfile)
def getActivities(self, tree):
'''Function to return all activities in Garmin training center version 2 file
'''
root = tree.getroot()
activities = root.findall(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Activity")
return activities
def inDatabase(self, activity):
#comparing date and start time (sport may have been changed in DB after import)
time = self.detailsFromTCX(activity)
try:
self.pytrainer_main.ddbb.session.query(Activity).filter(Activity.date_time_utc == time).one()
return True
except exc.NoResultFound:
return False
def getSport(self, activity):
#return sport from file or overide if present
if self.sport:
return self.sport
#sportElement = activity.find(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Activity")
try:
sport = activity.get("Sport")
except:
sport = "import"
return sport
def detailsFromTCX(self, activity):
timeElement = activity.find(".//{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Id")
if timeElement is None:
return None
else:
return timeElement.text
def createGPXfile(self, gpxfile, activity):
""" Function to transform a Garmin Training Center v2 Track to a valid GPX+ file
"""
xslt_doc = etree.parse(self.data_path+"/translate.xsl")
transform = etree.XSLT(xslt_doc)
#xml_doc = etree.parse(filename)
xml_doc = activity
result_tree = transform(xml_doc)
result_tree.write(gpxfile, xml_declaration=True, encoding='UTF-8')
| pytrainer/pytrainer | plugins/garmin-fit/garmin-fit.py | Python | gpl-2.0 | 3,975 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
script: bivariate-analysis.py
This script follows univariate-analysis-2.py.
In this script we show how to load/compute bivariate analysis, where
cross correlations are computed between couple of observables (of similar
type)
"""
from __future__ import print_function
from builtins import input
import argparse
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
from tunacell import Experiment, Observable, FilterSet
from tunacell.base.observable import FunctionalObservable
from tunacell.base.observable import set_observable_list
from tunacell.filters.cells import FilterCellIDparity
from tunacell.stats.api import (compute_univariate, load_univariate,
compute_stationary, load_stationary, NoValidTimes,
compute_bivariate, load_bivariate,
compute_stationary_bivariate, load_stationary_bivariate)
from tunacell.stats.single import UnivariateIOError, StationaryUnivariateIOError
from tunacell.stats.two import BivariateIOError, StationaryBivariateIOError
from tunacell.stats.utils import Regions, CompuParams
from tunacell.plotting.dynamics import plot_stationary
from tunacell.plotting.statics import scatter_plot
# close all open plots
plt.close('all')
# Arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('-e', '--experiment', type=str,
help='Path to experiment root folder',
default='~/tmptunacell/simutest')
argparser.add_argument('-i', '--interactive',
help='Ask user to press Enter between parts',
action='store_true')
argparser.add_argument('--time', type=float,
help='Time per figure when non-interactive mode is on',
default=3)
args = argparser.parse_args()
single_plot_timing = args.time
msg = ('==============tunacell=tutorial==============\n'
'== ==\n'
'== Bivariate analysis ==\n'
'== ==\n'
'== This tutorial shows more details about ==\n'
'== the bivariate analysis (statistics of a ==\n'
'== couple of observables): ==\n'
'== * import/export of univariate results ==\n'
'== * computation of bivariate statistics ==\n'
'== * at stationary (cross-correlations) ==\n'
'== (refer to comments in code to get more ==\n'
'== details) ==\n'
'== ==\n'
'==============tunacell=tutorial==============\n')
print(msg)
print()
# =============================================================================
# We start with the same settings as in univariate-analysis-2.py.
# We define the same observables and load their univariate analysis, that
# is needed for further computing
# =============================================================================
# define the Parser instance, no filter applied
path_to_exp = args.experiment
exp = Experiment(path_to_exp)
# define a condition
even = FilterCellIDparity('even')
condition = FilterSet(label='evenID', filtercell=even)
# Reference values
md = exp.metadata
params = md['ornstein_uhlenbeck_params']
ref_mean = params['target']
ref_var = params['noise']/(2 * params['spring'])
ref_decayrate = params['spring']
# TIME-LAPSE OBSERVABLES (time-series per cell)
# exact growth rate (model)
ou = Observable(name='exact-growth-rate', raw='ou')
# local estimate of growth rate by using the differentiation of size measurement
# (the raw column 'exp_ou_int' plays the role of cell size in our simulations)
gr = Observable(name='approx-growth-rate', raw='exp_ou_int',
differentiate=True, scale='log',
local_fit=True, time_window=15.)
# dynamic, functional observable: twice the growth rate
ou2 = FunctionalObservable(name='double-growth-rate', f=lambda x : 2 * x, observables=[ou, ])
# time-aligned upon root cell division for size analysis
# fixing tref allows to align timeseries to a common origin; the 'root' option
# means that it will be aligned to each colony root cell division time
size = Observable(name='size', raw='exp_ou_int', tref='root')
continuous_obs = [ou, gr, ou2, size]
# SOME CELL-CYCLE TYPE OBSERVABLES (one value per cell)
# cell-cycle average growth rate
average_gr = Observable(name='average-growth-rate', raw='ou',
differentiate=False, scale='linear',
local_fit=False, mode='average', timing='g')
# size at cell division
division_size = Observable(name='division-size', raw='exp_ou_int',
differentiate=False, scale='log',
local_fit=False, mode='division', timing='g')
# increase in cell size timed at division time
increase = Observable(name='added-size', raw='exp_ou_int',
mode='net-increase-additive', timing='d')
cycle_obs = [average_gr, division_size, increase]
msg = 'Loading univariate results...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
univariates_store = {}
for obs in continuous_obs + cycle_obs:
print('* {} ...'.format(obs.name))
try:
univ = load_univariate(exp, obs, cset=[condition, ])
except UnivariateIOError:
univ = compute_univariate(exp, obs, cset=[condition, ])
univ.export_text() # save as text files
# store univariate object in a dic indexed by observable
univariates_store[obs] = univ
# some options for plotting functions
trefs = [40., 80., 150.]
grefs = [1, 2]
if obs in [ou, gr]:
kwargs = {'mean_ref': ref_mean,
'var_ref': ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [ou2, ]:
kwargs = {'mean_ref': 2 * ref_mean,
'var_ref': 4 * ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [size, increase]:
kwargs = {}
kwargs2 = {'trefs': trefs}
elif obs in [average_gr, ]:
kwargs = {'mean_ref': ref_mean}
kwargs2 = {'trefs': grefs}
else:
kwargs = {}
kwargs2 = {'trefs': grefs}
# print('Ok')
regions = Regions(exp)
# regions.reset() # eliminate all regions except 'ALL'
steady_region = regions.get('ALL')
# and we need to use some computation options (more on that elsewhere)
# define computation options
options = CompuParams() # leaving to default is safe
# =============================================================================
# We proceed first to dynamic bivariate analysis, which computes matrices
# of covariances.
# To do so, we need to define couples of observables of the same type, i.e.
# either couple of dynamic observables, either couple of cell-cycle observables
# with both absolute timing or both generation timing.
# =============================================================================
couples = [(ou, gr), (average_gr, division_size)]
# note that ordering with a couple matters
msg = 'Computation of bivariate statistics...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
for o1, o2 in couples:
print('* Couple {} - {} ...'.format(o1.name, o2.name))
u1 = univariates_store[o1]
u2 = univariates_store[o2]
try:
biv = load_bivariate(u1, u2)
except BivariateIOError:
biv = compute_bivariate(u1, u2)
biv.export_text()
# print('Ok')
# export master result as dataframe and look at random rows
print('Looking at some examples computed for master...')
df = biv.master.as_dataframe()
if len(df[df['counts'] > 0]) > 10:
excerpt = df[df['counts'] > 0].sample(10).sort_index()
else:
excerpt = df[df['counts'] > 0]
print('{}'.format(excerpt))
print()
# =============================================================================
# Now we move to the more informative cross correlation function at stationarity
# =============================================================================
msg = 'Cross-correlation at stationarity'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
figs = []
for o1, o2 in couples:
print('* Couple {} - {} ...'.format(o1.name, o2.name))
u1 = univariates_store[o1]
u2 = univariates_store[o2]
try:
biv = load_stationary_bivariate(u1, u2, steady_region, options)
except StationaryBivariateIOError:
biv = compute_stationary_bivariate(u1, u2, steady_region, options)
biv.export_text()
# print('Ok')
# export master result as dataframe and look at random rows
print('Looking at some examples computed for master...')
df = biv.master.as_dataframe()
if len(df[df['counts'] > 0]) > 10:
excerpt = df[df['counts'] > 0].sample(10).sort_index()
else:
excerpt = df[df['counts'] > 0]
print('{}'.format(excerpt))
print()
if o1 == ou:
kwargs = {'show_exp_decay': ref_decayrate}
else:
kwargs = {}
fig = plot_stationary(biv, save=True, **kwargs)
fig.show()
figs.append(fig)
if args.interactive:
ans = input('Press Enter to close these figures and proceed')
else:
for seconds in tqdm(range(10*len(figs)), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
# =============================================================================
# We can also analyse the bivariate analysis at stationarity with
# a scatter plot and associated empirical distributions.
# For instance to study the dependency between division_size and cell cycle
# growth rate:
# =============================================================================
msg = 'Scatter plot of division size vs cell cycle growth rate'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
biv = load_stationary_bivariate(univariates_store[average_gr],
univariates_store[division_size],
steady_region, options)
fig, ax0, ax1, ax2, hs = scatter_plot(biv, xsize=6, ysize=6,
use_xname=None,
use_yname=None,
groupby=None,
color_index=2,
xunits=r'min$^{{-1}}$',
yunits=r'$\mu m$')
labels = [h.get_label() for h in hs]
ax1.legend(handles=hs, labels=labels, loc='upper left', bbox_to_anchor=(1, 1))
fig.show()
if args.interactive:
ans = input('Press Enter to close these figures and terminate script')
else:
for seconds in tqdm(range(10), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
| LeBarbouze/tunacell | scripts/bivariate-analysis.py | Python | mit | 10,874 |
#!C:\Users\yinanf\Dropbox\nctreehole\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.34','console_scripts','easy_install-2.7'
__requires__ = 'distribute==0.6.34'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.34', 'console_scripts', 'easy_install-2.7')()
)
| yinanfang/nctreehole | nctreehole/venv/Scripts/easy_install-2.7-script.py | Python | gpl-3.0 | 384 |
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from decimal import Decimal
from byceps.database import generate_uuid
from byceps.services.shop.article.transfer.models import (
Article,
ArticleNumber,
ArticleID,
ArticleNumber,
ArticleType,
)
from byceps.services.shop.cart.models import Cart
from byceps.services.shop.shop.transfer.models import ShopID
def test_cart_empty_repr():
cart = Cart()
assert repr(cart) == '<Cart(0 items)>'
def test_cart_filled_repr():
article1 = create_article(
ArticleNumber('a-001'), 'Article #1', Decimal('19.99'), Decimal('0.19')
)
article2 = create_article(
ArticleNumber('a-002'), 'Article #2', Decimal('24.99'), Decimal('0.19')
)
cart = Cart()
cart.add_item(article1, 5)
cart.add_item(article1, 3)
assert repr(cart) == '<Cart(2 items)>'
# helpers
def create_article(
item_number: ArticleNumber,
description: str,
price: Decimal,
tax_rate: Decimal,
) -> Article:
return Article(
id=ArticleID(generate_uuid()),
shop_id=ShopID('any-shop'),
item_number=item_number,
type_=ArticleType.other,
description=description,
price=price,
tax_rate=tax_rate,
available_from=None,
available_until=None,
total_quantity=99,
quantity=1,
max_quantity_per_order=10,
not_directly_orderable=False,
separate_order_required=False,
shipping_required=False,
)
| homeworkprod/byceps | tests/unit/services/shop/cart/test_cart_repr.py | Python | bsd-3-clause | 1,558 |
#!/usr/bin/env python
from MAPI import *
from MAPI.Util import *
import sys
def check_input():
if len(sys.argv) < 2:
sys.exit('Usage: %s username' % sys.argv[0])
def reset_settings():
s = OpenECSession(sys.argv[1], '', 'file:///var/run/zarafa')
st = GetDefaultStore(s)
PR_EC_WEBACCESS_SETTINGS = PROP_TAG(PT_STRING8, PR_EC_BASE+0x70)
settings = st.OpenProperty(PR_EC_WEBACCESS_SETTINGS, IID_IStream, 0, MAPI_MODIFY|MAPI_CREATE)
settings.SetSize(0)
settings.Seek(0, STREAM_SEEK_END)
writesettings = settings.Write('a:1:{s:8:"settings";a:1:{s:6:"global";a:3:{s:18:"hierarchylistwidth";s:19:"0";s:13:"maillistwidth";s:3:"375";s:13:"sizeinpercent";s:4:"true";}}}')
if writesettings:
print "Settings for user '%s' were reset." % sys.argv[1]
else:
print "Settings for user '%s' failed to be reset." % sys.argv[1]
settings.Commit(0)
if __name__ == '__main__':
check_input()
reset_settings()
| robertwbrandt/zarafa | zarafa-tools/webaccess/reset_webaccess_settings.py | Python | gpl-2.0 | 1,039 |
import MDAnalysis
from MDAnalysis.analysis import rms
from MDAnalysis.analysis.align import *
from collections import defaultdict
from timeit import default_timer as timer
class RMSF_measurements(object):
"""Measures RMSF of ligand atoms over a single trajectory."""
def __init__(self,topology_data_object, topology, trajectory,ligand_name,start_frame_num=None,end_frame_num=None,skip=None):
self.ligand_rmsf = defaultdict(int)
self.topology_data = topology_data_object
self.trajectory = trajectory
self.topology = topology
self.start = start_frame_num
self.end = end_frame_num
self.skip = skip
self.measure_ligand_rmsf(ligand_name)
self.min_value = min(self.ligand_rmsf.values())
self.max_value = max(self.ligand_rmsf.values())
def measure_ligand_rmsf(self,ligand_name):
i=0
rmsf_list={}
start = timer()
for traj in self.trajectory:
self.topology_data.universe.load_new(traj)
reference = MDAnalysis.Universe(self.topology)
#align = MDAnalysis.analysis.align.AlignTraj(self.topology_data.universe, reference, filename='test.xtc',select='protein',start=self.start[i],stop = self.end[i],step = self.skip[i],verbose=True)
align = MDAnalysis.analysis.align.AlignTraj(self.topology_data.universe, reference, filename='test.xtc',select='protein',verbose=True)
align.run()
aligned_universe = MDAnalysis.Universe(self.topology,"test.xtc")
ligand_noH = aligned_universe.select_atoms(ligand_name+" and not name H*")
R = MDAnalysis.analysis.rms.RMSF(ligand_noH)
R.run()
rmsf_list[i] = R.rmsf.tolist()
i+=1
for index,atom in enumerate(self.topology_data.universe.ligand_noH.atoms):
for traj in rmsf_list:
self.ligand_rmsf[atom.name] += rmsf_list[traj][index]
self.ligand_rmsf = {k:v/len(self.trajectory) for k,v in self.ligand_rmsf.items()}
print "RMSF: "+str(timer()-start)
| ldomic/lintools | lintools/analysis/rmsf.py | Python | gpl-3.0 | 2,077 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#| ANSSI, https://www.ssi.gouv.fr |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import binascii
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck
from netzob.Model.Vocabulary.Messages.RawMessage import RawMessage
class L2NetworkMessage(RawMessage):
"""Definition of a layer 2 network message.
>>> msg = L2NetworkMessage(b"090002300202f000")
>>> print(msg.data)
b'090002300202f000'
>>> msg = L2NetworkMessage(b"090002300202f000", date=1352293417.28, l2SourceAddress="00:02:7b:00:bf:33", l2DestinationAddress="00:02:3f:a8:bf:21")
>>> print(msg.source)
00:02:7b:00:bf:33
>>> print(msg.destination)
00:02:3f:a8:bf:21
>>> print(msg)
\033[0;32m[1352293417.28 \033[0;m\033[1;32m00:02:7b:00:bf:33\033[1;m\033[0;32m->\033[0;m\033[1;32m00:02:3f:a8:bf:21\033[1;m\033[0;32m]\033[0;m '090002300202f000'
"""
def __init__(self,
data,
date=None,
l2Protocol=None,
l2SourceAddress=None,
l2DestinationAddress=None):
super(L2NetworkMessage, self).__init__(
data,
date=date,
source=l2SourceAddress,
destination=l2DestinationAddress,
messageType="Network")
self.l2Protocol = str(l2Protocol)
self.l2SourceAddress = str(l2SourceAddress)
self.l2DestinationAddress = str(l2DestinationAddress)
@property
def l2Protocol(self):
"""The protocol of the second layer
:type: str
"""
return self.__l2Protocol
@l2Protocol.setter
@typeCheck(str)
def l2Protocol(self, l2Protocol):
self.__l2Protocol = l2Protocol
@property
def l2SourceAddress(self):
"""The source address of the second layer
:type: str
"""
return self.__l2SourceAddress
@l2SourceAddress.setter
@typeCheck(str)
def l2SourceAddress(self, l2SourceAddress):
self.__l2SourceAddress = l2SourceAddress
@property
def l2DestinationAddress(self):
"""The destination address of the second layer
:type: str
"""
return self.__l2DestinationAddress
@l2DestinationAddress.setter
@typeCheck(str)
def l2DestinationAddress(self, l2DestinationAddress):
self.__l2DestinationAddress = l2DestinationAddress
| lootr/netzob | netzob/src/netzob/Model/Vocabulary/Messages/L2NetworkMessage.py | Python | gpl-3.0 | 4,639 |
# Copyright: Luis Pedro Coelho <[email protected]>, 2012-2018
# License: MIT
import numpy as np
def read_roi(fileobj):
'''
points = read_roi(fileobj)
Read ImageJ's ROI format
Parameters
----------
fileobj: should be a file-like object
Returns
-------
points: a list of points
'''
# This is based on:
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html
SPLINE_FIT = 1
DOUBLE_HEADED = 2
OUTLINE = 4
OVERLAY_LABELS = 8
OVERLAY_NAMES = 16
OVERLAY_BACKGROUNDS = 32
OVERLAY_BOLD = 64
SUB_PIXEL_RESOLUTION = 128
DRAW_OFFSET = 256
pos = [4]
def get8():
pos[0] += 1
s = fileobj.read(1)
if not s:
raise IOError('readroi: Unexpected EOF')
return ord(s)
def get16():
b0 = get8()
b1 = get8()
return (b0 << 8) | b1
def get32():
s0 = get16()
s1 = get16()
return (s0 << 16) | s1
def getfloat():
v = np.int32(get32())
return v.view(np.float32)
magic = fileobj.read(4)
if magic != b'Iout':
raise IOError('Magic number not found')
version = get16()
# It seems that the roi type field occupies 2 Bytes, but only one is used
roi_type = get8()
# Discard second Byte:
get8()
if not (0 <= roi_type < 11):
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if roi_type != 7:
raise ValueError('roireader: ROI type %s not supported (!= 7)' % roi_type)
top = get16()
left = get16()
bottom = get16()
right = get16()
n_coordinates = get16()
x1 = getfloat()
y1 = getfloat()
x2 = getfloat()
y2 = getfloat()
stroke_width = get16()
shape_roi_size = get32()
stroke_color = get32()
fill_color = get32()
subtype = get16()
if subtype != 0:
raise ValueError('roireader: ROI subtype {} not supported (!= 0)'.format(subtype))
options = get16()
arrow_style = get8()
arrow_head_size = get8()
rect_arc_size = get16()
position = get32()
header2offset = get32()
if options & SUB_PIXEL_RESOLUTION:
getc = getfloat
points = np.empty((n_coordinates, 2), dtype=np.float32)
else:
getc = get16
points = np.empty((n_coordinates, 2), dtype=np.int16)
points[:,1] = [getc() for i in range(n_coordinates)]
points[:,0] = [getc() for i in range(n_coordinates)]
points[:,1] += left
points[:,0] += top
points -= 1
return points
def read_roi_zip(fname):
'''
Reads all ROIs in a ZIP file
Parameters
----------
fname : str
Input filename
Returns
-------
rois: list of ROIs
Each ROI is a vector of 2D points
See Also
--------
read_roi: function, reads a single ROI
'''
import zipfile
with zipfile.ZipFile(fname) as zf:
return [read_roi(zf.open(n))
for n in zf.namelist()]
| luispedro/imread | imread/ijrois.py | Python | mit | 3,061 |
from typing import Dict, List, Optional
from idgames.game import Game
INT_TO_GAME: Dict[int, Game] = {
0: Game.DOOM,
1: Game.DOOM2,
2: Game.TNT,
3: Game.PLUTONIA,
4: Game.HERETIC,
5: Game.HEXEN,
6: Game.STRIFE,
7: Game.CHEX,
8: Game.HACX,
}
GAME_TO_INT: Dict[Game, int] = {
Game.DOOM: 0,
Game.DOOM2: 1,
Game.TNT: 2,
Game.PLUTONIA: 3,
Game.HERETIC: 4,
Game.HEXEN: 5,
Game.STRIFE: 6,
Game.CHEX: 7,
Game.HACX: 8,
}
class Entry:
def __init__(self, path: str, file_modified: int, entry_updated: int):
self.path: str = path
self.file_modified: int = file_modified
self.entry_updated: int = entry_updated
self.id: Optional[int] = None
self.title: Optional[str] = None
self.game: Optional[Game] = None
self.authors: List[str] = []
def __repr__(self):
return '{}, {}: {}'.format(self.id, self.path, self.title)
def to_row(self) -> Dict[str, any]:
game = None
if self.game in GAME_TO_INT:
game = GAME_TO_INT.get(self.game)
return {
'path': self.path,
'file_modified': self.file_modified,
'entry_updated': self.entry_updated,
'title': self.title,
'game': game
}
@staticmethod
def from_row(row: Dict):
game: Game = Game.UNKNOWN
if row['game'] in INT_TO_GAME:
game = INT_TO_GAME.get(row['game'])
entry = Entry(
row['path'],
row['file_modified'],
row['entry_updated']
)
entry.id = row['id']
entry.title = row['title']
entry.game = game
return entry
| GitExl/DoomIdgamesArchive | idgames-extract/src/idgames/entry.py | Python | bsd-2-clause | 1,722 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import FileMapping
# Register your models here.
admin.site.register(FileMapping)
| tiankangkan/paper_plane | treasure/admin.py | Python | gpl-3.0 | 155 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info[0] < 3:
from Queue import Empty
else:
from queue import Empty
from multiprocessing import Process, Queue
class ExceptionItem(object):
def __init__(self, exception):
self.exception = exception
class ParallelGeneratorException(Exception):
pass
class GeneratorDied(ParallelGeneratorException):
pass
class ParallelGenerator(object):
def __init__(self,
orig_gen,
max_lookahead=None,
get_timeout=10):
"""
Creates a parallel generator from a normal one.
The elements will be prefetched up to max_lookahead
ahead of the consumer. If max_lookahead is None,
everything will be fetched.
The get_timeout parameter is the number of seconds
after which we check that the subprocess is still
alive, when waiting for an element to be generated.
Any exception raised in the generator will
be forwarded to this parallel generator.
"""
if max_lookahead:
self.queue = Queue(max_lookahead)
else:
self.queue = Queue()
def wrapped():
try:
for item in orig_gen:
self.queue.put(item)
raise StopIteration()
except Exception as e:
self.queue.put(ExceptionItem(e))
self.get_timeout = get_timeout
self.ppid = None # pid of the parent process
self.process = Process(target=wrapped)
self.process_started = False
def finish_if_possible(self):
"""
We can only terminate the child process from the parent process
"""
if self.ppid == os.getpid() and self.process:# and self.process.is_alive():
self.process.terminate()
self.process = None
self.queue = None
self.ppid = None
def __enter__(self):
"""
Starts the process
"""
self.ppid = os.getpid()
self.process.start()
self.process_started = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Kills the process
"""
assert self.process_started and self.ppid == None or self.ppid == os.getpid()
self.finish_if_possible()
def __next__(self):
return self.next()
def __iter__(self):
return self
def __del__(self):
self.finish_if_possible()
def next(self):
if not self.process_started:
raise ParallelGeneratorException(
"""The generator has not been started.
Please use "with ParallelGenerator(..) as g:"
""")
try:
item_received = False
while not item_received:
try:
item = self.queue.get(timeout=self.get_timeout)
item_received = True
except Empty:
# check that the process is still alive
if not self.process.is_alive():
raise GeneratorDied(
"The generator died unexpectedly.")
if type(item) == ExceptionItem:
raise item.exception
return item
except Exception:
self.finish_if_possible()
raise
| wetneb/multiprocessing_generator | multiprocessing_generator/__init__.py | Python | mit | 3,447 |
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import yaml
import filelock
import os
from . import basepoller
LOG = logging.getLogger(__name__)
class YamlFT(basepoller.BasePollerFT):
def __init__(self, name, chassis, config):
self.file_monitor_mtime = None
super(YamlFT, self).__init__(name, chassis, config)
def configure(self):
super(YamlFT, self).configure()
self.path = self.config.get('path', None)
if self.path is None:
self.path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s_indicators.yml' % self.name
)
self.lock_path = self.path+'.lock'
def _flush(self):
self.file_monitor_mtime = None
super(YamlFT, self)._flush()
def _process_item(self, item):
indicator = item.pop('indicator', None)
if indicator is None:
return [[None, None]]
item['sources'] = [self.name]
return [[indicator, item]]
def _load_yaml(self):
with filelock.FileLock(self.lock_path).acquire(timeout=10):
with open(self.path, 'r') as f:
result = yaml.safe_load(f)
if type(result) != list:
raise RuntimeError(
'%s - %s should be a list of indicators' %
(self.name, self.path)
)
return result
def _build_iterator(self, now):
if self.path is None:
LOG.warning('%s - no path configured', self.name)
raise RuntimeError('%s - no path configured' % self.name)
try:
mtime = os.stat(self.path).st_mtime
except OSError as e:
if e.errno == 2: # no such file
return None
LOG.exception('%s - error checking mtime of %s',
self.name, self.path)
raise RuntimeError(
'%s - error checking indicators list' % self.name
)
if mtime == self.file_monitor_mtime:
return None
self.file_monitor_mtime = mtime
try:
return self._load_yaml()
except:
LOG.exception('%s - exception loading indicators list', self.name)
raise
@staticmethod
def gc(name, config=None):
basepoller.BasePollerFT.gc(name, config=config)
path = None
if config is not None:
path = config.get('path', None)
if path is None:
path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}_indicators.yml'.format(name)
)
lock_path = '{}.lock'.format(path)
try:
os.remove(path)
except:
pass
try:
os.remove(lock_path)
except:
pass
class YamlIPv4FT(YamlFT):
def _process_item(self, item):
item['type'] = 'IPv4'
return super(YamlIPv4FT, self)._process_item(item)
class YamlURLFT(YamlFT):
def _process_item(self, item):
item['type'] = 'URL'
return super(YamlURLFT, self)._process_item(item)
class YamlDomainFT(YamlFT):
def _process_item(self, item):
item['type'] = 'domain'
return super(YamlDomainFT, self)._process_item(item)
class YamlIPv6FT(YamlFT):
def _process_item(self, item):
item['type'] = 'IPv6'
return super(YamlIPv6FT, self)._process_item(item)
| PaloAltoNetworks/minemeld-core | minemeld/ft/local.py | Python | apache-2.0 | 4,022 |
from django.db.models import Q
import snotes20.models as models
import osf
def find_or_create_osf_tag(short):
try:
return models.OSFTag.objects.get(Q(short=short) | Q(name=short))
except models.OSFTag.DoesNotExist:
tag = models.OSFTag(name=short)
tag.save()
return tag
def add_osf_note(state, line, parent=None):
if isinstance(line, osf.ParseError):
error = models.DocumentStateError(
state=state,
line=line.line,
message=line.message
)
error.save()
else:
note = models.OSFNote(
timestamp=line.time,
title=line.text,
url=line.link,
order=line._line
)
if parent is None:
note.state = state
else:
note.parent = parent
note.save()
note.tags.add(*[find_or_create_osf_tag(tag) for tag in line.tags])
for nnote in line.notes:
add_osf_note(state, nnote, note)
def prep(text):
header, p_lines = osf.parse_lines(text.split('\n'))
o_lines = osf.objectify_lines(p_lines)
return {
'header': header,
'p_lines': p_lines,
'o_lines': o_lines
}
def handle(prepped):
state = models.OSFDocumentState()
state.save()
for line in prepped['o_lines']:
add_osf_note(state, line)
return state
| shownotes/snotes20-restapi | snotes20/contenttypes/osf.py | Python | agpl-3.0 | 1,396 |
#encoding:utf-8
subreddit = 'thehatedone'
t_channel = '@r_thehatedone'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| Fillll/reddit2telegram | reddit2telegram/channels/~inactive/r_thehatedone/app.py | Python | mit | 145 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
class TimeProfile:
"""Class for simple profiling of action, with logging of cost."""
def __init__(self, description='operation'):
self._starttime = None
self._endtime = None
self._description = description
self.Start()
def Start(self):
self._starttime = time.time()
self._endtime = None
def GetDelta(self):
"""Returns the rounded delta.
Also stops the timer if Stop() has not already been called.
"""
if self._endtime is None:
self.Stop(log=False)
delta = self._endtime - self._starttime
delta = round(delta, 2) if delta < 10 else round(delta, 1)
return delta
def LogResult(self):
"""Logs the result."""
logging.info('%s seconds to perform %s', self.GetDelta(), self._description)
def Stop(self, log=True):
"""Stop profiling.
Args:
log: Log the delta (defaults to true).
"""
self._endtime = time.time()
if log:
self.LogResult()
| ric2b/Vivaldi-browser | chromium/build/android/pylib/utils/time_profile.py | Python | bsd-3-clause | 1,141 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cmd
from .plugins import Plugins
class Console(cmd.Cmd):
prompt = "WebHardening> "
_colors = {'red': '#FF0000',
'white': '#FFFFFF',
'black': '#000000'}
def __init__ (self):
"""Constructor"""
cmd.Cmd.__init__(self)
self._plugins = Plugins()
def do_hello (self, name):
"""Says hello to someone"""
print "Hello %s!" % name
def do_version(self, text):
print "WebHardening v0.1"
def do_start(self, text):
print "Start."
def do_load(self, file_name):
print "Load profile."
def do_plugins(self, plugins):
if plugins in self._plugins.get_list_plugins():
print "%s: %s" % (plugins, plugins)
else:
print "I don’t know: %s" % plugins
def complete_plugins(self, text, line, begidx, endix):
return [i for i in self._plugins.get_list_plugins() if i.startswith(text)]
def do_get_color (self, color):
"""Prints out the hex representation of a color"""
if color in self._colors:
print "%s: %s" % (color, self._colors[color])
else:
print "I don’t know: %s" % color
def complete_get_color (self, text, line, begidx, endix):
"""Complete function for get_color"""
return [i for i in self._colors if i.startswith(text)]
def do_quit (self, s):
print "Bye, bye…"
return True
def help_version(self):
print "Show WebHardening version information."
def help_quit (self):
print "Quits the console"
def help_start(self):
print "Start the WebHardening"
def help_load(self):
print "Load profile."
do_EOF = do_quit
help_EOF = help_quit
| elcodigok/WebHardening | core/api/console.py | Python | gpl-3.0 | 1,818 |
import pathlib
import pytest
import meshio
from . import helpers
test_set = [
# helpers.empty_mesh,
helpers.tet_mesh
]
@pytest.mark.parametrize("mesh", test_set)
def test(mesh, tmp_path):
helpers.write_read(
tmp_path,
meshio.tetgen.write,
meshio.tetgen.read,
mesh,
1.0e-15,
extension=".node",
)
@pytest.mark.parametrize(
"filename, point_ref_sum, cell_ref_sum", [("mesh.ele", 12, 373)]
)
def test_point_cell_refs(filename, point_ref_sum, cell_ref_sum):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "tetgen" / filename
mesh = meshio.read(filename)
assert mesh.point_data["tetgen:ref"].sum() == point_ref_sum
assert mesh.cell_data["tetgen:ref"][0].sum() == cell_ref_sum
| nschloe/meshio | tests/test_tetgen.py | Python | mit | 804 |
import click
from globus_cli.login_manager import LoginManager
from globus_cli.parsing import IdentityType, command
from globus_cli.termio import FORMAT_TEXT_TABLE, formatted_print, is_verbose
from globus_cli.utils import CLIStubResponse
@command(
"get-identities",
short_help="Lookup Globus Auth Identities",
adoc_examples="""Resolve a user ID (outputs the user's username)
[source,bash]
----
$ globus get-identities c699d42e-d274-11e5-bf75-1fc5bf53bb24
----
Resolve a username (outputs the user's ID)
[source,bash]
----
$ globus get-identities [email protected]
----
Resolve multiple usernames and or IDs with tabular output
[source,bash]
----
$ globus get-identities --verbose [email protected] [email protected] \
84942ca8-17c4-4080-9036-2f58e0093869
----
""",
)
@click.argument(
"values", type=IdentityType(allow_b32_usernames=True), required=True, nargs=-1
)
@click.option("--provision", hidden=True, is_flag=True)
@LoginManager.requires_login(LoginManager.AUTH_RS)
def get_identities_command(*, login_manager: LoginManager, values, provision):
"""
Lookup Globus Auth Identities given one or more uuids
and/or usernames.
Default output resolves each UUID to a username and each username to a UUID,
with one output per line in the same order as the inputs.
If a particular input had no corresponding identity in Globus Auth,
"NO_SUCH_IDENTITY" is printed instead.
If more fields are desired, --verbose will give tabular output, but does not
guarantee order and ignores inputs with no corresponding Globus Auth identity.
"""
auth_client = login_manager.get_auth_client()
# since API doesn't accept mixed ids and usernames,
# split input values into separate lists
ids = [v.value for v in values if v.idtype == "identity"]
usernames = [v.value for v in values if v.idtype == "username"]
# make two calls to get_identities with ids and usernames
# then combine the calls into one response
results = []
if len(ids):
results += auth_client.get_identities(ids=ids, provision=provision)[
"identities"
]
if len(usernames):
results += auth_client.get_identities(usernames=usernames, provision=provision)[
"identities"
]
res = CLIStubResponse({"identities": results})
def _custom_text_format(identities):
"""
Non-verbose text output is customized
"""
def resolve_identity(value):
"""
helper to deal with variable inputs and uncertain response order
"""
for identity in identities:
if identity["id"] == value:
return identity["username"]
if identity["username"] == value:
return identity["id"]
return "NO_SUCH_IDENTITY"
# standard output is one resolved identity per line in the same order
# as the inputs. A resolved identity is either a username if given a
# UUID vice versa, or "NO_SUCH_IDENTITY" if the identity could not be
# found
for val in values:
click.echo(resolve_identity(val.value))
formatted_print(
res,
response_key="identities",
fields=[
("ID", "id"),
("Username", "username"),
("Full Name", "name"),
("Organization", "organization"),
("Email Address", "email"),
],
# verbose output is a table. Order not guaranteed, may contain
# duplicates
text_format=(FORMAT_TEXT_TABLE if is_verbose() else _custom_text_format),
)
| globus/globus-cli | src/globus_cli/commands/get_identities.py | Python | apache-2.0 | 3,647 |
from __future__ import division
import json
import os.path
from nflgame import OrderedDict
import nflgame.seq
import nflgame.statmap
_player_json_file = os.path.join(os.path.dirname(__file__), 'players.json')
def _create_players(jsonf=None):
"""
Creates a dict of Player objects from the players.json file, keyed
by GSIS ids.
"""
if jsonf is None:
jsonf = _player_json_file
try:
data = json.loads(open(jsonf).read())
except IOError:
return {}
players = {}
for playerid in data:
players[playerid] = Player(data[playerid])
return players
class Player (object):
"""
Player instances represent meta information about a single player.
This information includes name, team, position, status, height,
weight, college, jersey number, birth date, years, pro, etc.
Player information is populated from NFL.com profile pages.
"""
def __init__(self, data):
self.player_id = data['gsis_id']
self.gsis_name = data.get('gsis_name', '')
self.full_name = data.get('full_name', '')
self.first_name = data.get('first_name', '')
self.last_name = data.get('last_name', '')
self.team = data.get('team', '')
self.position = data.get('position', '')
self.profile_id = data.get('profile_id', 0)
self.profile_url = data.get('profile_url', '')
self.uniform_number = data.get('number', 0)
self.birthdate = data.get('birthdate', '')
self.college = data.get('college', '')
self.height = data.get('height', '')
self.weight = data.get('weight', '')
self.years_pro = data.get('years_pro', 0)
self.status = data.get('status', '')
# API backwards compatibility.
self.gsis_id = self.player_id
self.playerid = self.player_id
self.name = self.full_name
self.number = self.uniform_number
def stats(self, year, week=None):
games = nflgame.games(year, week)
players = list(nflgame.combine(games).filter(playerid=self.playerid))
if len(players) == 0:
return GamePlayerStats(self.player_id, self.gsis_name,
None, self.team)
return players[0]
def plays(self, year, week=None):
plays = []
games = nflgame.games(year, week)
for g in games:
plays += filter(lambda p: p.has_player(self.playerid),
list(g.drives.plays()))
return nflgame.seq.GenPlays(plays)
def __str__(self):
return '%s (%s, %s)' % (self.name, self.position, self.team)
class PlayerDefense (Player):
def __init__(self, team):
self.playerid = None
self.name = team
self.team = team
self.position = 'DEF'
def stats(self, year, week=None):
assert False, 'Cannot be called on a defense.'
def plays(self, year, week=None):
assert False, 'Cannot be called on a defense.'
def __str__(self):
return '%s Defense' % self.team
class PlayerStats (object):
"""
Player represents a single player and all of his statistical categories.
Every player has 'playerid', 'name' and 'home' fields.
Additionally, depending upon which statistical categories that player
was involved in for the game, he'll have properties such as 'passing_tds',
'rushing_yds', 'defense_int' and 'kicking_fgm'.
In order to know whether a paricular player belongs to a statical category,
you may use the filtering methods of a player sequence or alternatively,
use the has_cat method with arguments like 'passing', 'rushing', 'kicking',
etc. (A player sequence in this case would be an instance of
GenPlayerStats.)
You may also inspect whether a player has a certain property by using
the special __dict__ attribute. For example::
if 'passing_yds' in player.__dict__:
# Do something with player.passing_yds
"""
def __init__(self, playerid, name, home, team):
"""
Create a new Player instance with the player id (from NFL.com's
GameCenter), the player's name (e.g., "T.Brady") and whether the
player is playing in a home game or not.
"""
self.playerid = playerid
self.name = name
self.home = home
self.team = team
self._stats = OrderedDict()
self.player = None
if self.playerid in nflgame.players:
self.player = nflgame.players[self.playerid]
def has_cat(self, cat):
for f in self._stats:
if f.startswith(cat):
return True
return False
@property
def guess_position(self):
"""
Guesses the position of this player based on the statistical
categories present in this object when player meta is not
present.
Note that if this resorts to a guess, then it will be more
effective on aggregate data rather than data from just a
single play. (e.g., if a QB runs the ball, and that's the
only data available, the position returned will be RB.)
When a position is guessed, only the following positions will
be returned: QB, RB, WR, DEF, K and P.
"""
# Look for the player meta first. Duh.
if self.player is not None:
return self.player.position
stats = [
(self.passing_att, 'QB'),
(self.rushing_att, 'RB'),
(self.receiving_tar, 'WR'),
(self.defense_tkl, 'DEF'),
(self.defense_ast, 'DEF'),
(self.kicking_tot, 'K'),
(self.kicking_fga, 'K'),
(self.punting_tot, 'P'),
]
return sorted(stats, reverse=True)[0][1]
@property
def tds(self):
"""
Returns the total number of touchdowns credited to this player across
all statistical categories.
"""
n = 0
for f, v in self.__dict__.iteritems():
if f.endswith('tds'):
n += v
return n
@property
def twopta(self):
"""
Returns the total number of two point conversion attempts for
the passing, rushing and receiving categories.
"""
return (self.passing_twopta
+ self.rushing_twopta
+ self.receiving_twopta)
@property
def twoptm(self):
"""
Returns the total number of two point conversions for
the passing, rushing and receiving categories.
"""
return (self.passing_twoptm
+ self.rushing_twoptm
+ self.receiving_twoptm)
@property
def twoptmissed(self):
"""
Returns the total number of two point conversion failures for
the passing, rushing and receiving categories.
"""
return (self.passing_twoptmissed
+ self.rushing_twoptmissed
+ self.receiving_twoptmissed)
@property
def stats(self):
"""
Returns a dict of all stats for the player.
"""
return self._stats
def formatted_stats(self):
"""
Returns a roughly-formatted string of all statistics for this player.
"""
s = []
for stat, val in self._stats.iteritems():
s.append('%s: %s' % (stat, val))
return ', '.join(s)
def _add_stats(self, stats):
for k, v in stats.iteritems():
self.__dict__[k] = self.__dict__.get(k, 0) + v
self._stats[k] = self.__dict__[k]
def _overwrite_stats(self, stats):
for k, v in stats.iteritems():
self.__dict__[k] = v
self._stats[k] = self.__dict__[k]
def __str__(self):
"""
Simply returns the player's name, e.g., "T.Brady".
"""
return self.name
def __add__(self, other):
"""
Adds two players together. Only two player objects that correspond
to the same human (i.e., GameCenter identifier) can be added together.
If two different players are added together, an assertion will
be raised.
The effect of adding two player objects simply corresponds to the
sums of all statistical values.
Note that as soon as two players have been added, the 'home' property
becomes undefined if the two operands have different values of 'home'.
"""
assert self.playerid == other.playerid
assert type(self) == type(other)
if self.home != other.home:
home = None
else:
home = self.home
new_player = self.__class__(self.playerid, self.name, home, self.team)
new_player._add_stats(self._stats)
new_player._add_stats(other._stats)
return new_player
def __sub__(self, other):
assert self.playerid == other.playerid
assert type(self) == type(other)
new_player = GamePlayerStats(self.playerid,
self.name, self.home, self.team)
new_player._add_stats(self._stats)
for bk, bv in other._stats.iteritems():
if bk not in new_player._stats: # stat was taken away? ignore.
continue
new_player._stats[bk] -= bv
if new_player._stats[bk] == 0:
del new_player._stats[bk]
else:
new_player.__dict__[bk] = new_player._stats[bk]
anydiffs = False
for k, v in new_player._stats.iteritems():
if v > 0:
anydiffs = True
break
if not anydiffs:
return None
return new_player
def __getattr__(self, name):
# If name has one of the categories as a prefix, then return
# a default value of zero
for cat in nflgame.statmap.categories:
if name.startswith(cat):
return 0
raise AttributeError
def passer_rating(self):
"""
Calculate and return the passer rating using the NFL formula. Passer
rating is calculated using a player's passing attempts, completions,
yards, touchdowns, and interceptions. Passer rating in the NFL is on a
scale from 0 to 158.3.
"""
l = [((self.passing_cmp / self.passing_att) - .3) * 5]
l.append(((self.passing_yds / self.passing_att) - 3) * .25)
l.append((self.tds / self.passing_att) * 20)
l.append(2.375 - (self.passing_ints / self.passing_att * 25))
m = []
for a in l:
if a < 0:
a = 0
m.append(a)
elif a > 2.375:
a = 2.375
m.append(a)
else:
m.append(a)
rating = round((sum(m) / 6) * 100, 1)
return rating
class GamePlayerStats (PlayerStats):
def __init__(self, playerid, name, home, team):
super(GamePlayerStats, self).__init__(playerid, name, home, team)
self.games = 1
def __add__(self, other):
new_player = super(GamePlayerStats, self).__add__(other)
new_player.games = self.games + other.games
return new_player
class PlayPlayerStats (PlayerStats):
pass
| icebluesun/nflgame | nflgame/player.py | Python | unlicense | 11,285 |
'''Simple require framework to ensure certain packages are loaded
'''
from __future__ import absolute_import
from __future__ import with_statement
import sys
import re
import os
from contextlib import contextmanager
from .filepath import FilePath, DirPath
from .ctxsingleton import CtxSingleton
from .symbol import Symbol
#from .load import loadfile #at bottom
class RequireError(Exception):
pass
class RequireState(CtxSingleton):
def _cxs_setup_top(self):
self.search_paths = ['.']
self.loaded_packages = set()
state = RequireState()
@contextmanager
def files_search_path(filepath=None):
if filepath is None:
yield None
else:
directory = FilePath(filepath).abspath().parent()
with state.top(search_paths = [directory] + state.top.search_paths):
yield None
def require(*names):
from .load import loadfile
for name in names:
name = normalize_name(name)
if name in state.loaded_packages:
return
basepath = get_name_path(name)
state.loaded_packages.add(name)
try:
loadfile(basepath, state.search_paths)
except Exception:
tp,value,tb = sys.exc_info()
try:
state.loaded_packages.remove(name)
except Exception:
pass
raise tp,value,tb
def normalize_name(name):
if isinstance(name, Symbol):
name = name.print_form
parts = re.split(r'[/.]+', name.strip())
return '.'.join(parts)
def get_name_path(name):
return name.replace('.', '/')
| matthagy/Jamenson | jamenson/runtime/require.py | Python | apache-2.0 | 1,592 |
import unittest
from unittest import mock
import tethys_apps
from tethys_apps.apps import TethysAppsConfig
class TestApps(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_TethysAppsConfig(self):
self.assertEqual('tethys_apps', TethysAppsConfig.name)
self.assertEqual('Tethys Apps', TethysAppsConfig.verbose_name)
@mock.patch('tethys_apps.apps.SingletonHarvester')
def test_ready(self, mock_singleton_harvester):
tethys_app_config_obj = TethysAppsConfig('tethys_apps', tethys_apps)
tethys_app_config_obj.ready()
mock_singleton_harvester().harvest.assert_called()
| CI-WATER/tethys | tests/unit_tests/test_tethys_apps/test_apps.py | Python | bsd-2-clause | 670 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Send sample query to prediction engine
"""
import predictionio
engine_client = predictionio.EngineClient(url="http://localhost:8000")
print engine_client.send_query({"items": ["i1", "i3"], "num": 4})
| pferrel/PredictionIO | examples/scala-parallel-similarproduct/no-set-user/data/send_query.py | Python | apache-2.0 | 990 |
#!/usr/bin/python3
"""
Copyright (c) 2016-2018 - o2r project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gevent import monkey
monkey.patch_all()
import argparse
import ast
import hashlib
import json
import logging
import traceback
import urllib.parse
import uuid
import bagit
import requests
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
from bottle import *
from pymongo import MongoClient, errors
import inspect
from repos import *
from repos.helpers import *
# Bottle
app = Bottle()
logging.getLogger('bagit').setLevel(logging.CRITICAL)
@app.hook('before_request')
def strip_path():
# remove trailing slashes
try:
request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')
except Exception as exc:
status_note(['! error: ', xstr(exc.args[0])], d=is_debug)
@app.route('/api/v1/shipment/<name>', method='GET')
def shipment_get_one(name):
data = db['shipments'].find_one({'id': name})
if data is not None:
response.status = 200
response.content_type = 'application/json'
if '_id' in data:
data.pop('_id', None)
data.pop('dl_filepath', None)
return json.dumps(data)
else:
status_note(['user requested non-existing shipment ', name], d=is_debug)
response.status = 404
response.content_type = 'application/json'
return json.dumps({'error': 'a compendium with that id does not exist'})
@app.route('/api/v1/shipment', method='GET')
def shipment_get_all():
try:
cid = request.query.compendium_id
find_args = {}
if cid:
find_args.update({'compendium_id': cid})
answer_list = []
for key in db['shipments'].find(find_args):
answer_list.append(key['id'])
response.content_type = 'application/json'
response.status = 200
return json.dumps(answer_list)
except Exception as exc:
status_note(str(exc), d=is_debug)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'bad request'})
@app.route('/api/v1/shipment/<shipmentid>/status', method='GET')
def shipment_get_status(shipmentid):
try:
data = db['shipments'].find_one({'id': shipmentid})
if data is not None:
if 'status' in data:
response.status = 200
response.content_type = 'application/json'
return {'id': shipmentid, 'status': str(data['status'])}
else:
response.status = 400
response.content_type = 'application/json'
return {'error': 'shipment not found'}
except:
raise
@app.route('/api/v1/shipment/<shipmentid>/files', method='GET')
def shipment_get_file_id(shipmentid):
try:
global REPO_TARGET
global REPO_TOKEN
current_depot = db_find_depotid_from_shipment(shipmentid)
db_fill_repo_target_and_list(shipmentid)
headers = {"Content-Type": "application/json"}
r = requests.get(''.join((REPO_TARGET.get_host(), '/deposit/depositions/', current_depot, '?access_token=', REPO_TOKEN)), headers=headers)
if 'files' in r.json():
response.status = 200
response.content_type = 'application/json'
return json_dumps({'files': r.json()['files']})
else:
response.status = 400
response.content_type = 'application/json'
return {'error': 'no files object in repository response'}
except:
raise
@app.route('/api/v1/shipment/<shipmentid>/dl', method='GET')
def shipment_get_dl_file(shipmentid):
try:
global REPO_TARGET
global REPO_LIST
if REPO_LIST is not None:
# allows for multiple DL sources:
for repo in REPO_LIST:
REPO_TARGET = repo
if hasattr(REPO_TARGET, 'get_id'):
# default for now:
if REPO_TARGET.get_id() == 'download':
break
else:
REPO_TARGET = None
if REPO_TARGET is None:
status_note('! no repository with download feature configured', d=is_debug)
response.status = 501
response.content_type = 'application/json'
return json.dumps({'error': 'no repository with download feature configured'})
else:
response.status = 202
response.headers['Content-Type'] = 'application/zip'
response.headers['Content-Disposition'] = ''.join(('attachment; filename=', shipmentid, '.zip'))
p = os.path.normpath(db_find_dl_filepath_from_shipment(shipmentid))
status_note(str(generate_zipstream(p)), d=is_debug)
return generate_zipstream(p)
except Exception as exc:
status_note(['! error: ', xstr(exc.args[0])], d=is_debug)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'bad request'})
@app.route('/api/v1/shipment/<shipmentid>/publishment', method='PUT')
def shipment_put_publishment(shipmentid):
try:
#! once published, cannot delete in most repos
global REPO_TARGET
global REPO_TOKEN
current_depot = db_find_depotid_from_shipment(shipmentid)
# get a return of the response of the publish request from the corresponding repo class
a = REPO_TARGET.publish(current_depot, REPO_TOKEN)
if not a:
status_note('! error, failed to call publish', d=is_debug)
response.status = 500
response.content_type = 'application/json'
r = {'id': shipmentid, 'status': 'error'}
return json.dumps(r)
else:
if a == 200 or a == 202: # note that some repos will return a 202 CREATED
r = {'id': shipmentid, 'status': 'published'}
# update shipment data in database
data = db['shipments'].find_one({'id': shipmentid})
if data is not None:
if 'status' in data:
data['status'] = 'published'
db['shipments'].update_one({'_id': data['_id']}, {'$set': data}, upsert=True)
status_note(['updated shipment object ', xstr(data['_id'])], d=is_debug)
else:
r = {'id': shipmentid, 'status': 'error'}
response.status = 200
response.content_type = 'application/json'
return json.dumps(r)
except:
raise
@app.route('/api/v1/shipment/<shipmentid>/publishment', method='GET')
def shipment_get_publishment(shipmentid):
try:
global REPO_TARGET
global REPO_TOKEN
db_fill_repo_target_and_list(shipmentid)
current_depot = db_find_depotid_from_shipment(shipmentid)
db_fill_repo_target_and_list(shipmentid)
REPO_TARGET.get_list_of_files_from_depot(current_depot, REPO_TOKEN)
except:
raise
@app.route('/api/v1/shipment/<shipmentid>/files/<fileid>', method='DELETE')
def shipment_del_file_id(shipmentid, fileid):
# delete specific file in a depot of a shipment
try:
global REPO_TARGET
global REPO_TOKEN
current_depot = db_find_depotid_from_shipment(shipmentid)
db_fill_repo_target_and_list(shipmentid)
if hasattr(REPO_TARGET, 'del_from_depot'):
if REPO_TARGET.del_from_depot(current_depot, fileid, REPO_TOKEN) == 204:
response.status = 204
return '', 204
except:
raise
@app.route('/api/v1/shipment', method='POST')
def shipment_post_new():
try:
status_note('# # # New shipment request # # #')
global env_compendium_files
# First check if user level is high enough:
try:
# prefer this if provided via request (for non-browser use and testing)
cookie = request.forms.get('cookie')
if cookie is None:
cookie = request.get_cookie(env_cookie_name)
except:
cookie = request.get_cookie(env_cookie_name)
if cookie is None:
status_note(['cookie <', env_cookie_name, '> cannot be found!'], d=is_debug)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'bad request: authentication cookie is missing'})
cookie = urllib.parse.unquote(cookie)
user_entitled = session_user_entitled(cookie, env_user_level_min)
status_note(['validating session with cookie <', cookie, '> and minimum level ', str(env_user_level_min), '. found user <', str(user_entitled), '>'], d=is_debug)
if user_entitled:
# get shipment id
new_id = request.forms.get('_id')
if new_id is None:
# create new shipment id because request did not include one
new_id = uuid.uuid4()
new_md = request.forms.get('md')
if new_md is None:
new_md = {}
else:
try:
new_md = ast.literal_eval(new_md)
except:
new_md = {}
# shipment_data is the administrative metadata about the current shipment for the shipments collection in DB
shipment_data = {'id': str(new_id),
'compendium_id': request.forms.get('compendium_id'),
'deposition_id': request.forms.get('deposition_id'),
'deposition_url': request.forms.get('deposition_url'),
'update_packaging': request.forms.get('update_packaging'),
'recipient': request.forms.get('recipient'),
'last_modified': str(datetime.now()),
'user': user_entitled,
'status': 'to be shipped',
'md': new_md
}
# compendium_data is the administrative metadata about the current compendium in the compendia collection in DB
compendium_data = {}
this_compendium_mongo_doc_id = ''
try:
compendium_data = db['compendia'].find_one({'id': shipment_data['compendium_id']})
# make mongo db _id referencable
this_compendium_mongo_doc_id = compendium_data.get('_id')
except errors.PyMongoError:
status_note(['! error: no compendium data for <', shipment_data['compendium_id'], '>'], d=is_debug)
# create object for administrative metadata in the shipment collection in DB
this_shipment_mongo_doc = db.shipments.insert_one(shipment_data)
status_note(['created shipment object ', xstr(this_shipment_mongo_doc.inserted_id)], d=is_debug)
status = 200
if shipment_data['recipient'] not in REPO_LIST_availables_as_IDstr:
# that recipient is not available, hence cancel new shipment
status_note("! error: recipient not available in configured repos", d=is_debug)
shipment_data['status'] = 'error'
status = 400
else:
# Set REPO TARGET object from REPO LIST:
global REPO_TARGET
global REPO_TOKEN
# Refresh recipients and make the lists available:
db_fill_repo_target_and_list(str(new_id))
if shipment_data['deposition_id'] is None or shipment_data['deposition_id'] == {}:
# No depot yet, go create one
if compendium_data is None:
status_note('! Invalid compendium id', d=is_debug)
shipment_data['status'] = 'error'
status = 400
else:
# Check if candidate exists
if 'candidate' not in compendium_data:
status_note('no <candidate> element in db doc for that compendium', d=is_debug)
shipment_data['status'] = 'error'
status = 403
else:
if compendium_data['candidate'] is True:
status_note('ERC candidate may not be shipped.')
shipment_data['status'] = 'error'
status = 403
else:
# Aquire path to files via env var and id:
compendium_files = os.path.normpath(os.path.join(env_compendium_files, shipment_data['compendium_id']))
# Determine state of that compendium: Is is a bag or not, zipped, valid, etc:
compendium_state = files_scan_path(compendium_files)
if not compendium_state == 0:
# Case path does not exist:
if compendium_state == 1:
# Case: Is a bagit bag:
try:
bag = bagit.Bag(compendium_files)
bag.validate()
status_note(['valid bagit bag at <', str(shipment_data['compendium_id']), '>'], d=is_debug)
except bagit.BagValidationError as e:
status_note(['! invalid bagit bag at <', str(shipment_data['compendium_id']), '>'], d=is_debug)
details = []
for d in e.details:
details.append(str(d))
status_note(xstr(d))
# Exit point for invalid not to be repaired bags
if not strtobool(shipment_data['update_packaging']):
shipment_data['status'] = 'error'
compendium_data['bag'] = False
# update shipment data in database
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data}, upsert=True)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': str(details)})
else:
status_note('updating bagit bag...')
# Open bag object and update:
try:
bag = bagit.Bag(compendium_files)
bag.save(manifests=True)
# Validate a second time to ensure successful update:
try:
bag.validate()
status_note(['Valid updated bagit bag at <', str(shipment_data['compendium_id']), '>'], d=is_debug)
except bagit.BagValidationError:
status_note('! error while validating updated bag')
shipment_data['status'] = 'error'
compendium_data['bag'] = False
# update shipment data in database
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data}, upsert=True)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'unable to validate updated bag'})
except Exception as e:
status_note(['! error while bagging: ', str(e)], d=is_debug)
elif compendium_state == 2:
# Case: dir is no bagit bag, needs to become a bag first
try:
bag = bagit.make_bag(compendium_files)
bag.save()
compendium_data['metadata']["o2r"]['codefiles'] = list(map(addDataPath, compendium_data['metadata']["o2r"]['codefiles']))
compendium_data['metadata']["o2r"]['inputfiles'] = list(map(addDataPath, compendium_data['metadata']["o2r"]['inputfiles']))
compendium_data['metadata']["o2r"]['mainfile_candidates'] = list(map(addDataPath, compendium_data['metadata']["o2r"]['mainfile_candidates']))
compendium_data['metadata']["o2r"]['displayfile_candidates'] = list(map(addDataPath, compendium_data['metadata']["o2r"]['displayfile_candidates']))
compendium_data['metadata']["o2r"]['mainfile'] = addDataPath(compendium_data['metadata']["o2r"]["mainfile"])
compendium_data['metadata']["o2r"]['displayfile'] = addDataPath(compendium_data['metadata']["o2r"]["displayfile"])
status_note('New bagit bag written')
except Exception as e:
status_note(['! error while bagging: ', xstr(e)], d=is_debug)
#elif compendium_state == 3: # would be dealing with zip files...
else:
status_note(['! error, invalid path to compendium: ', compendium_files], d=is_debug)
shipment_data['status'] = 'error'
compendium_data['bag'] = False
# Update shipment data in database
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data}, upsert=True)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'invalid path to compendium'})
# Continue with zipping and upload
compendium_data['bag'] = True
compendium_data['compendium'] = True
# update compendium data in DB
db.compendia.update_one({'_id': this_compendium_mongo_doc_id}, {'$set': compendium_data}, upsert=True)
# update shipment data in DB
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data}, upsert=True)
status_note(['updated shipment object ', xstr(this_shipment_mongo_doc.inserted_id)], d=is_debug)
# Ship to the selected repository
file_name = '.'.join((str(shipment_data['compendium_id']), 'zip'))
if not hasattr(REPO_TARGET, 'create_depot'):
# fetch DL link if available
if hasattr(REPO_TARGET, 'get_dl'):
shipment_data['dl_filepath'] = REPO_TARGET.get_dl(file_name, compendium_files)
status_note('started download stream...', d=False)
shipment_data['status'] = 'shipped'
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data},
upsert=True)
return shipment_get_dl_file(shipment_data['id'])
else:
status_note('! error, the selected recipient repo class has no method to create a new depot', d=is_debug)
response.status = 500
response.content_type = 'application/json'
return json.dumps(
{'error': 'recipient repo class misses a method to create a new file depot'})
else:
# the selected repo class does have a function 'create_depot', now use it:
shipment_data['deposition_id'] = REPO_TARGET.create_depot(REPO_TOKEN)
# zip all files in dir and submit as zip:
REPO_TARGET.add_zip_to_depot(shipment_data['deposition_id'], file_name, compendium_files, REPO_TOKEN, env_max_dir_size_mb)
# Add metadata that are in compendium in db:
if 'metadata' in compendium_data and 'deposition_id' in shipment_data:
REPO_TARGET.add_metadata(shipment_data['deposition_id'], compendium_data['metadata'], REPO_TOKEN)
shipment_data['status'] = 'shipped'
# shipment is complete, update administrative metadata for shipment and compendium in DB one last time
db.shipments.update_one({'_id': this_shipment_mongo_doc.inserted_id}, {'$set': shipment_data}, upsert=True)
db.compendia.update_one({'_id': this_compendium_mongo_doc_id}, {'$set': compendium_data}, upsert=True)
# build and send response
response.status = status
response.content_type = 'application/json'
# preview object for logger:
d = {'id': shipment_data['id'],
'recipient': shipment_data['recipient'],
'deposition_id': shipment_data['deposition_id'],
'status': shipment_data['status']
}
return json.dumps(d)
else:
response.status = 403
response.content_type = 'application/json'
return json.dumps({'error': 'insufficient permissions (not logged in?)'})
except requests.exceptions.RequestException as rexc:
raise
status_note(['! error: ', xstr(rexc)], d=is_debug)
response.status = 400
response.content_type = 'application/json'
return json.dumps({'error': 'bad request'})
except Exception as exc:
raise
status_note(['! error: ', xstr(exc.args[0])], d=is_debug)
message = ''.join('bad request:', exc.args[0])
response.status = 500
response.content_type = 'application/json'
return json.dumps({'error': message})
@app.route('/api/v1/recipient', method='GET')
def recipient_get_repo_list():
try:
global REPO_LIST
output = {'recipients': []}
for repo in REPO_LIST:
try:
output['recipients'].append({'id': xstr(repo.get_id()), 'label': repo.get_label()})
except AttributeError:
status_note(['! error: repository class ', xstr(repo), ' @ ', xstr(name), ' is unlabled or has no function to return its label.'], d=is_debug)
response.status = 200
response.content_type = 'application/json'
return json.dumps(output)
except Exception as exc:
status_note(['! error: ', xstr(exc)], d=is_debug)
raise
#http errors
#@app.error(404)
#def error404(error):
# response.content_type = 'application/json'
# return json.dumps(str(error))
#@app.error(500)
#def error500(error):
# response.content_type = 'application/json'
# return json.dumps(str(error))
# Session
def session_get_cookie(val, secret):
try:
# Create session cookie string for session ID.
signature = hmac.new(str.encode(secret), msg=str.encode(val), digestmod=hashlib.sha256).digest()
signature_enc = base64.b64encode(signature)
cookie = ''.join(('s:', val, '.', signature_enc.decode()))
cookie = re.sub(r'\=+$', '', cookie) # remove trailing = characters
return cookie
except Exception as exc:
#raise
status_note(['! error: ', exc.args[0]])
def session_get_user(cookie, my_db):
session_id = cookie.split('.')[0].split('s:')[1]
if not session_id:
status_note(['no session found for cookie <', xstr(cookie), '>'])
return None
if hmac.compare_digest(cookie, session_get_cookie(session_id, env_session_secret)):
sessions = my_db['sessions']
try:
session = sessions.find_one({'_id': session_id})
session_user = session['session']['passport']['user']
user_doc = my_db['users'].find_one({'orcid': session_user})
return user_doc['orcid']
except Exception as exc:
# raise
status_note(['! error: ', str(exc.args[0])])
else:
return None
def addDataPath(path):
return(os.path.join('data', path))
def session_user_entitled(cookie, min_lvl):
if cookie:
user_orcid = session_get_user(cookie, db)
if not user_orcid:
status_note(['no orcid found for cookie <', xstr(cookie), '>'])
return None
this_user = db['users'].find_one({'orcid': user_orcid})
status_note(['found user <', xstr(this_user), '> for orcid ', user_orcid])
if this_user:
if this_user['level'] >= min_lvl:
return this_user['orcid']
else:
return None
else:
return None
else:
return None
def db_fill_repo_target_and_list(shipmentid):
global REPO_TARGET
global REPO_TOKEN
global REPO_LIST
global TOKEN_LIST
if shipmentid is not None:
data = db['shipments'].find_one({'id': shipmentid})
if data is not None:
if 'recipient' in data:
# check if in repo list
for repo in REPO_LIST:
if data['recipient'].lower() == repo.get_id():
REPO_TARGET = repo
try:
REPO_TOKEN = TOKEN_LIST[repo.get_id()]
except:
status_note([' ! missing token for', repo.get_id()], d=is_debug)
else:
status_note(' ! no recipient specified in db dataset', d=is_debug)
else:
status_note(' ! no shipment specified in db dataset', d=is_debug)
else:
status_note(' ! error retrieving shipment id and recipient', d=is_debug)
def db_find_depotid_from_shipment(shipmentid):
data = db['shipments'].find_one({'id': shipmentid})
if data is not None:
if 'deposition_id' in data:
return str(data['deposition_id'])
else:
return None
def db_find_dl_filepath_from_shipment(shipmentid):
data = db['shipments'].find_one({'id': shipmentid})
if data is not None:
if 'dl_filepath' in data:
return str(data['dl_filepath'])
else:
return None
def register_repos():
# dynamically instantiate repositories that are in 'repo' folder
# 'configured' means both repoclass and token of that repo are available
global REPO_LIST
global TOKEN_LIST
global REPO_LIST_availables_as_IDstr
if TOKEN_LIST is None:
status_note('! no repository tokens available, unable to proceed')
sys.exit(1)
else:
try:
shortlist = []
for name, obj in inspect.getmembers(sys.modules[__name__]):
if name.startswith('repo'):
for n, class_obj in inspect.getmembers(obj):
if n.startswith('RepoClass') and class_obj not in shortlist:
shortlist.append(class_obj)
# unique list without import cross references
for class_obj in shortlist:
i = class_obj()
for listed_token in TOKEN_LIST:
if listed_token == i.get_id():
# see if function to verify the token exists in repo class:
if hasattr(i, 'verify_token'):
# only add to list, if valid token:
if i.verify_token(TOKEN_LIST[listed_token]):
# add instantiated class module for each repo
REPO_LIST.append(class_obj())
# add name id of that repo to a list for checking recipients available later
REPO_LIST_availables_as_IDstr.append(i.get_id())
if len(REPO_LIST) > 0:
status_note([str(len(REPO_LIST)), ' repositories configured'])
else:
status_note('! no repositories configured')
except:
raise
def save_get_from_config(element, config_dict):
try:
if config_dict is None:
return None
else:
if element in config_dict:
return config_dict[element]
else:
return None
except Exception as erc:
status_note(['! error, ', xstr(exc)], d=is_debug)
return None
# Main
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='shipper arguments')
parser.add_argument('-d', '--debug', help='enable debug mode', required=False, action='store_true', default=False)
parser.add_argument('-t', '--token', type=json.loads, help='access tokens', required=False)
# args parsed:
args = vars(parser.parse_args())
status_note(['args: ', xstr(args)])
global is_debug
is_debug = args['debug']
try:
if not os.path.isfile('config.json'):
status_note('configuration file missing. unable to proceed', d=is_debug)
exit(1)
else:
with open('config.json') as data_file:
config = json.load(data_file)
env_mongo_host = os.environ.get('SHIPPER_MONGODB', save_get_from_config('mongodb_host', config))
env_mongo_db_name = os.environ.get('SHIPPER_MONGO_NAME', save_get_from_config('mongodb_db', config))
env_bottle_host = os.environ.get('SHIPPER_BOTTLE_HOST', save_get_from_config('bottle_host', config))
env_bottle_port = os.environ.get('SHIPPER_BOTTLE_PORT', save_get_from_config('bottle_port', config))
TOKEN_LIST = []
rt = os.environ.get('SHIPPER_REPO_TOKENS', save_get_from_config('repository_tokens', config))
if type(rt) is str:
try:
TOKEN_LIST = json.loads(os.environ.get('SHIPPER_REPO_TOKENS', save_get_from_config('repository_tokens', config)))
except:
TOKEN_LIST = None
elif type(rt) is dict:
TOKEN_LIST = rt
# overwrite if token is given via:
if args is not None:
if 'token' in args:
if args['token'] is not None:
if args['token'] == {}:
status_note('token argument is empty. unable to proceed', d=is_debug)
sys.exit(1)
else:
TOKEN_LIST = args['token']
# Get environment variables
env_file_base_path = os.environ.get('SHIPPER_BASE_PATH', save_get_from_config('base_path', config))
env_max_dir_size_mb = os.environ.get('SHIPPER_MAX_DIR_SIZE', save_get_from_config('max_size_mb', config))
env_session_secret = os.environ.get('SHIPPER_SECRET', save_get_from_config('session_secret', config))
env_user_level_min = os.environ.get('SHIPPER_USERLEVEL_MIN', save_get_from_config('userlevel_min', config))
env_cookie_name = os.environ.get('SHIPPER_COOKIE_NAME', save_get_from_config('cookie_name', config))
env_compendium_files = os.path.join(env_file_base_path, 'compendium')
env_user_id = None
status_note(['loaded environment vars and db config:',
'\n\tMongoDB: ', env_mongo_host, env_mongo_db_name,
'\n\tbottle: ', env_bottle_host, ':', env_bottle_port,
'\n\ttokens: ', TOKEN_LIST], d=is_debug)
REPO_TARGET = None # generic repository object
REPO_LIST = []
REPO_LIST_availables_as_IDstr = []
# load repo classes from /repo and register
register_repos()
REPO_TOKEN = '' # generic secret token from remote api
except OSError as oexc:
status_note(['! error, unable to process environmental vars. unable to proceed.', xstr(oexc)], d=is_debug)
sys.exit(1)
except Exception as exc:
status_note(['! error, unable to configure shipper. unable to proceed.', xstr(exc)], d=is_debug)
sys.exit(1)
# connect to db
try:
status_note(['connecting to ', env_mongo_host], d=is_debug)
client = MongoClient(env_mongo_host, serverSelectionTimeoutMS=12000)
db = client[env_mongo_db_name]
status_note(['connected. MongoDB server version: ', client.server_info()['version']], d=is_debug)
except errors.ServerSelectionTimeoutError as texc:
status_note(['! error: mongodb timeout error: ', xstr(texc)])
sys.exit(1)
except Exception as exc:
status_note(['! error: mongodb connection error: ', xstr(exc)])
status_note(traceback.format_exc(), d=is_debug)
sys.exit(1)
# start service
try:
# shipper logo
status_note(base64.b64decode('IA0KLi0tLS0tLS0tLS0tLS0tLg0KfCAgICAgXy5fICBfICAgIGAuLF9fX19fXw0KfCAgICAobzJyKChfKCAgICAgIF9fXyhfKCkNCnwgIFwnLS06LS0tOi0uICAgLCcNCictLS0tLS0tLS0tLS0tLSc=').decode('utf-8'))
time.sleep(0.1)
# start bottle-gevent
run(app=app, host=env_bottle_host, port=env_bottle_port, server='gevent', debug=True)
except Exception as exc:
status_note(['! error, bottle server could not be started: ', traceback.format_exc()], d=is_debug)
sys.exit(1)
| o2r-project/o2r-shipper | shipper.py | Python | apache-2.0 | 35,917 |
from .peer import (
pypeer_eye_masking,
pypeer_zscore,
pypeer_ravel_data,
motion_scrub,
prep_for_pypeer
)
__all__ = [
'pypeer_eye_masking',
'pypeer_zscore',
'pypeer_ravel_data',
'motion_scrub',
'prep_for_pypeer'
] | FCP-INDI/C-PAC | CPAC/pypeer/__init__.py | Python | bsd-3-clause | 254 |
"""
General functions for HTML manipulation, backported from Py3.
Note that this uses Python 2.7 code with the corresponding Python 3
module names and locations.
"""
from __future__ import unicode_literals
_escape_map = {ord('&'): '&', ord('<'): '<', ord('>'): '>'}
_escape_map_full = {ord('&'): '&', ord('<'): '<', ord('>'): '>',
ord('"'): '"', ord('\''): '''}
# NB: this is a candidate for a bytes/string polymorphic interface
def escape(s, quote=True):
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
assert not isinstance(s, bytes), 'Pass a unicode string'
if quote:
return s.translate(_escape_map_full)
return s.translate(_escape_map)
| thonkify/thonkify | src/lib/future/backports/html/__init__.py | Python | mit | 924 |
from helper import unittest, PillowTestCase, hopper
import io
from PIL import Image
# sample ppm stream
TEST_ICO_FILE = "Tests/images/hopper.ico"
TEST_DATA = open(TEST_ICO_FILE, "rb").read()
class TestFileIco(PillowTestCase):
def test_sanity(self):
im = Image.open(TEST_ICO_FILE)
im.load()
self.assertEqual(im.mode, "RGBA")
self.assertEqual(im.size, (16, 16))
self.assertEqual(im.format, "ICO")
def test_save_to_bytes(self):
output = io.BytesIO()
im = hopper()
im.save(output, "ico", sizes=[(32, 32), (64, 64)])
# the default image
output.seek(0)
reloaded = Image.open(output)
self.assertEqual(reloaded.info['sizes'],set([(32, 32), (64, 64)]))
self.assertEqual(im.mode, reloaded.mode)
self.assertEqual((64, 64), reloaded.size)
self.assertEqual(reloaded.format, "ICO")
self.assert_image_equal(reloaded, hopper().resize((64,64), Image.LANCZOS))
# the other one
output.seek(0)
reloaded = Image.open(output)
reloaded.size = (32,32)
self.assertEqual(im.mode, reloaded.mode)
self.assertEqual((32, 32), reloaded.size)
self.assertEqual(reloaded.format, "ICO")
self.assert_image_equal(reloaded, hopper().resize((32,32), Image.LANCZOS))
if __name__ == '__main__':
unittest.main()
# End of file
| 1upon0/rfid-auth-system | GUI/printer/Pillow-2.7.0/Tests/test_file_ico.py | Python | apache-2.0 | 1,402 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# support: fastannotate support for hgweb, and filectx
from __future__ import absolute_import
from edenscm.mercurial import context as hgcontext, extensions, hgweb, patch, util
from . import context, revmap
class _lazyfctx(object):
"""delegates to fctx but do not construct fctx when unnecessary"""
def __init__(self, repo, node, path):
self._node = node
self._path = path
self._repo = repo
def node(self):
return self._node
def path(self):
return self._path
@util.propertycache
def _fctx(self):
return context.resolvefctx(self._repo, self._node, self._path)
def __getattr__(self, name):
return getattr(self._fctx, name)
def _convertoutputs(repo, annotated, contents):
"""convert fastannotate outputs to vanilla annotate format"""
# fastannotate returns: [(nodeid, linenum, path)], [linecontent]
# convert to what fctx.annotate returns: [((fctx, linenum), linecontent)]
results = []
fctxmap = {}
annotateline = getattr(hgcontext, "annotateline", None)
for i, (hsh, linenum, path) in enumerate(annotated):
if (hsh, path) not in fctxmap:
fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
# linenum: the user wants 1-based, we have 0-based.
lineno = linenum + 1
fctx = fctxmap[(hsh, path)]
line = contents[i]
if annotateline is None:
results.append(((fctx, lineno), line))
else:
# 2e32c6a31cc7 introduced annotateline
results.append((annotateline(fctx=fctx, lineno=lineno), line))
return results
def _getmaster(fctx):
"""(fctx) -> str"""
return fctx._repo.ui.config("fastannotate", "mainbranch") or "default"
def _doannotate(fctx, follow=True, diffopts=None):
"""like the vanilla fctx.annotate, but do it via fastannotate, and make
the output format compatible with the vanilla fctx.annotate.
may raise Exception, and always return line numbers.
"""
master = _getmaster(fctx)
annotated = contents = None
with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
try:
annotated, contents = ac.annotate(
fctx.rev(), master=master, showpath=True, showlines=True
)
except Exception:
ac.rebuild() # try rebuild once
fctx._repo.ui.debug(
"fastannotate: %s: rebuilding broken cache\n" % fctx._path
)
try:
annotated, contents = ac.annotate(
fctx.rev(), master=master, showpath=True, showlines=True
)
except Exception:
raise
assert annotated and contents
return _convertoutputs(fctx._repo, annotated, contents)
def _hgwebannotate(orig, fctx, ui):
diffopts = patch.difffeatureopts(
ui, untrusted=True, section="annotate", whitespace=True
)
return _doannotate(fctx, diffopts=diffopts)
def _fctxannotate(
orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
):
if skiprevs:
# skiprevs is not supported yet
return orig(self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts)
try:
return _doannotate(self, follow, diffopts)
except Exception as ex:
self._repo.ui.debug(
"fastannotate: falling back to the vanilla " "annotate: %r\n" % ex
)
return orig(self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts)
def _remotefctxannotate(
orig,
self,
follow=False,
linenumber=None,
skiprevs=None,
diffopts=None,
prefetchskip=None,
):
# skipset: a set-like used to test if a fctx needs to be downloaded
skipset = None
with context.fctxannotatecontext(self, follow, diffopts) as ac:
skipset = revmap.revmap(ac.revmappath)
return orig(
self,
follow,
linenumber,
skiprevs=skiprevs,
diffopts=diffopts,
prefetchskip=skipset,
)
def replacehgwebannotate():
extensions.wrapfunction(hgweb.webutil, "annotate", _hgwebannotate)
def replacefctxannotate():
extensions.wrapfunction(hgcontext.basefilectx, "annotate", _fctxannotate)
def replaceremotefctxannotate():
try:
r = extensions.find("remotefilelog")
except KeyError:
return
else:
extensions.wrapfunction(
r.remotefilectx.remotefilectx, "annotate", _remotefctxannotate
)
| facebookexperimental/eden | eden/hg-server/edenscm/hgext/fastannotate/support.py | Python | gpl-2.0 | 4,656 |
def WebIDLTest(parser, harness):
parser.parse("""
[Global]
interface Foo : Bar {
getter any(DOMString name);
};
interface Bar {};
""")
results = parser.finish()
harness.ok(results[0].isOnGlobalProtoChain(),
"[Global] interface should be on global's proto chain")
harness.ok(results[1].isOnGlobalProtoChain(),
"[Global] interface should be on global's proto chain")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
getter any(DOMString name);
setter void(DOMString name, any arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"named setter")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
getter any(DOMString name);
deleter void(DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"named deleter")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global, OverrideBuiltins]
interface Foo {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"[OverrideBuiltins]")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo : Bar {
};
[OverrideBuiltins]
interface Bar {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with an "
"[OverrideBuiltins] ancestor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Global]
interface Foo {
};
interface Bar : Foo {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown for [Global] used on an interface with a "
"descendant")
| danlrobertson/servo | components/script/dom/bindings/codegen/parser/tests/test_global_extended_attr.py | Python | mpl-2.0 | 2,534 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""A stupid message compiler for C++."""
import os.path
import re
import struct
def truncateLong(x):
"""Returns the uint32 part of a long x."""
assert x > 0
unsigned_bytes = struct.pack("<Q", x)
return struct.unpack("<I", unsigned_bytes[:4])[0]
def reinterpretAsInt32(x):
"""Reinterprets an unsigned long as an int32."""
assert x > 0
unsigned_bytes = struct.pack("<Q", x)
return struct.unpack("<i", unsigned_bytes[:4])[0]
class Type(object):
INT32 = 0
INT64 = 1
BOOL = 2
STRING = 3
STRUCT = 4
LIST = 5
def __init__(self, code):
assert Type.INT32 <= code and code <= Type.LIST
self.typecode = code
def typeCode(self):
return self.typecode
INT32_MAX = 2147483647
INT32_MIN = -2147483648
INT32 = Type(Type.INT32)
INT64 = Type(Type.INT64)
BOOL = Type(Type.BOOL)
STRING = Type(Type.STRING)
STRUCT = Type(Type.STRUCT)
class List(Type):
def __init__(self, subtype):
Type.__init__(self, Type.LIST)
self.subtype = subtype
def typeCode(self):
return truncateLong(self.typecode * 33) ^ self.subtype.typeCode()
class MessageDefinition(object):
def __init__(self, name, comment):
self.typecode = Type.STRUCT
self.name = name
self.fields = []
self.comment = comment
def addField(self, type_structure, name, default, comment):
# Validate the default value
if type_structure.typecode == Type.INT32:
assert isinstance(default, int)
assert INT32_MIN <= default and default <= INT32_MAX
elif type_structure.typecode == Type.INT64:
assert isinstance(default, int)
elif type_structure.typecode == Type.BOOL:
assert isinstance(default, bool)
elif type_structure.typecode == Type.STRING:
assert default is None or isinstance(default, str)
elif type_structure.typecode == Type.STRUCT:
assert default is None
elif type_structure.typecode == Type.LIST:
assert default is None or len(default) >= 0
self.fields.append((type_structure, name, default, comment))
def typeCode(self):
"""Generate a "unique" ID using a bad hash function."""
# TODO: Do something smarter than this.
def DJBHash(hashval, value):
return truncateLong((hashval * 33) ^ value)
def DJBStringHash(hashval, string):
for c in string:
hashval = DJBHash(hashval, ord(c))
return hashval
hashval = 5381
hashval = DJBStringHash(hashval, self.name)
for (type_structure, name, default, comment) in self.fields:
hashval = DJBHash(hashval, type_structure.typeCode())
hashval = DJBStringHash(hashval, name)
return hashval
class CPPOutput(object):
def __init__(self, filename, namespace):
self.filename = filename
self.namespace = namespace
self.buffer = """/* AUTOMATICALLY GENERATED: DO NOT EDIT */
#ifndef %(guard)s
#define %(guard)s
#include <cassert>
#include <string>
#include <vector>
#include "base/assert.h"
#include "io/message.h"
#include "serialization.h"
namespace io {
class FIFOBuffer;
}
namespace %(namespace)s {\n\n""" % {'guard': self.includeGuard(), 'namespace': namespace}
self.indent_ = 0
self.loop_count_ = 0
self.defined_types = {}
def indent(self):
self.indent_ += 4
def unindent(self):
self.indent_ -= 4
assert self.indent_ >= 0
def out(self, text):
return " " * self.indent_ + text + "\n"
def includeGuard(self):
return self.filename.upper().replace(".", "_").replace("/", "_")
def typeString(self, type_structure):
if type_structure.typecode == Type.INT32:
return "int32_t"
if type_structure.typecode == Type.INT64:
return "int64_t"
if type_structure.typecode == Type.BOOL:
return "bool"
if type_structure.typecode == Type.STRING:
return "std::string"
if type_structure.typecode == Type.STRUCT:
return type_structure.name
if type_structure.typecode == Type.LIST:
# Space before closing > to avoid >> for List(List(foo))
return "std::vector<" + self.typeString(type_structure.subtype) + " >"
raise ValueError, "Unknown type: " + str(type_structure)
def typeValueString(self, type_structure, value):
if type_structure.typecode == Type.INT32:
return str(value)
if type_structure.typecode == Type.INT64:
return str(value)
if type_structure.typecode == Type.BOOL:
if value: return "true"
return "false"
if type_structure.typecode == Type.STRING:
return "std::string"
raise ValueError, "Unknown type: " + type_structure
def serializeList(self, type_structure, name, serializeMethod):
o = "{\n"
self.indent()
o += self.out("int32_t _size_ = static_cast<int32_t>(%s.size());" % (name))
o += self.out(
"assert(0 <= _size_ && static_cast<size_t>(_size_) == %s.size());" % (name))
o += self.out(self.serialize(INT32, "_size_"));
# Ensure the loop index variable is unique for nested loops
index_name = "_i%d_" % (self.loop_count_)
self.loop_count_ += 1
o += self.out("for (int %(index)s = 0; %(index)s < _size_; ++%(index)s) {" %
{ "index": index_name })
self.indent()
o += self.out(serializeMethod(type_structure.subtype, name + "[" + index_name + "]"));
self.unindent()
o += self.out("}")
self.unindent()
o += self.out("}")
# Trim trailing \n
return o[:-1]
def serialize(self, type_structure, name):
if type_structure.typecode == Type.LIST:
return self.serializeList(type_structure, name, self.serialize)
elif type_structure.typecode == Type.STRUCT:
return "%s.appendToString(_out_);" % (name)
else:
return "serialization::serialize(%s, _out_);" % (name)
def serializeBuffer(self, type_structure, name):
if type_structure.typecode == Type.LIST:
return self.serializeList(type_structure, name, self.serializeBuffer)
elif type_structure.typecode == Type.STRUCT:
return "%s.serialize(_out_);" % (name)
else:
return "serialization::serialize(%s, _out_);" % (name)
def deserialize(self, type_structure, name):
if type_structure.typecode == Type.LIST:
o = "{\n"
self.indent()
o += self.out("int32_t _size_;")
o += self.out("_start_ = serialization::deserialize(&_size_, _start_, _end_);")
o += self.out("assert(_size_ >= 0);")
o += self.out("%s.resize(_size_);" % (name))
# Ensure the loop index variable is unique for nested loops
index_name = "_i%d_" % (self.loop_count_)
self.loop_count_ += 1
o += self.out("for (int %(index)s = 0; %(index)s < _size_; ++%(index)s) {" %
{ "index": index_name })
self.indent()
o += self.out(self.deserialize(type_structure.subtype, name + "[" + index_name + "]"));
self.unindent()
o += self.out("}")
self.unindent()
o += self.out("}")
# Trim trailing \n
return o[:-1]
elif type_structure.typecode == Type.STRUCT:
return "_start_ = %s.parseFromString(_start_, _end_);" % (name)
else:
return "_start_ = serialization::deserialize(&%s, _start_, _end_);" % (name)
def addMessage(self, message):
self.buffer += self.out("// " + message.comment)
self.buffer += self.out("class %s : public io::Message {" % (message.name))
self.buffer += self.out("public:")
self.indent()
self.buffer += self.out("%s() :" % (message.name))
# Initialize default types
self.indent()
for type_structure, name, default, comment in message.fields:
# Skip field if there is no default value
if default is None: continue
self.buffer += self.out("%s(%s)," % (name, self.typeValueString(type_structure, default)))
# Trim trailing ,
if self.buffer.endswith(",\n"):
self.buffer = self.buffer[:-2]
else:
assert self.buffer.endswith(" :\n")
self.buffer = self.buffer[:-3]
self.buffer += " {}\n"
self.unindent()
# Add field definitions
for type_structure, name, default, comment in message.fields:
self.buffer += "\n"
if len(comment) > 0:
self.buffer += self.out("// " + comment)
self.buffer += self.out("%s %s;" % (self.typeString(type_structure), name))
# Add operator== and !=. TODO: Move this to a .cc?
self.buffer += "\n"
self.buffer += self.out("bool operator==(const %s& other) const {" % message.name)
self.indent()
for type_structure, name, default, comment in message.fields:
self.buffer += self.out("if (%s != other.%s) return false;" % (name, name))
self.buffer += self.out("return true;")
self.unindent()
self.buffer += self.out("}")
self.buffer += self.out("bool operator!=(const %s& other) const { return !(*this == other); }" % (message.name))
# Add appendToString
self.buffer += "\n"
self.buffer += self.out("void appendToString(std::string* _out_) const {")
self.indent()
for type_structure, name, default, comment in message.fields:
self.buffer += self.out(self.serialize(type_structure, name))
self.unindent()
self.buffer += self.out("}")
# Add serialize
self.buffer += "\n"
self.buffer += self.out("virtual void serialize(io::FIFOBuffer* _out_) const {")
self.indent()
for type_structure, name, default, comment in message.fields:
self.buffer += self.out(self.serializeBuffer(type_structure, name))
self.unindent()
self.buffer += self.out("}")
# Add parseFromString
self.buffer += "\n"
self.buffer += self.out("const char* parseFromString(const char* _start_, const char* _end_) {")
self.indent()
for type_structure, name, default, comment in message.fields:
self.buffer += self.out(self.deserialize(type_structure, name))
self.buffer += self.out("return _start_;")
self.unindent()
self.buffer += self.out("}")
# Add parseFromString(const std::string&) helper wrapper
self.buffer += "\n"
self.buffer += self.out("void parseFromString(const std::string& _str_) {")
self.indent()
self.buffer += self.out("const char* end = parseFromString(_str_.data(), _str_.data() + _str_.size());")
self.buffer += self.out("ASSERT(end == _str_.data() + _str_.size());")
self.unindent()
self.buffer += self.out("}")
# Add typeCode
self.buffer += "\n"
self.buffer += self.out("static int32_t typeCode() { return %d; }" % (
reinterpretAsInt32(message.typeCode())))
# All done
self.unindent()
self.buffer += self.out("};") + "\n"
def output(self):
self.buffer += "} // namespace %s\n" % (self.namespace)
self.buffer += "#endif // %s\n" % (self.includeGuard())
return self.buffer
class JavaOutput(object):
def __init__(self, filename, namespace):
self.namespace = namespace
classname = os.path.splitext(os.path.basename(filename))[0]
self.buffer = """/* AUTOMATICALLY GENERATED: DO NOT EDIT */
package %(namespace)s;
public final class %(classname)s {
// Not constructible.
private %(classname)s() {}
""" % {'namespace': namespace, 'classname': classname}
self.indent_ = 4
self.loop_count_ = 0
self.defined_types = {}
def indent(self):
self.indent_ += 4
def unindent(self):
self.indent_ -= 4
assert self.indent_ >= 0
def out(self, text):
return " " * self.indent_ + text + "\n"
def referenceTypeString(self, type_structure):
if type_structure.typecode == Type.INT32:
return "Integer"
if type_structure.typecode == Type.INT64:
return "Long"
if type_structure.typecode == Type.BOOL:
return "Boolean"
else:
return self.typeString(type_structure)
def typeString(self, type_structure):
if type_structure.typecode == Type.INT32:
return "int"
if type_structure.typecode == Type.INT64:
return "long"
if type_structure.typecode == Type.BOOL:
return "boolean"
if type_structure.typecode == Type.STRING:
return "String"
if type_structure.typecode == Type.STRUCT:
return type_structure.name
if type_structure.typecode == Type.LIST:
return "java.util.ArrayList<" + self.referenceTypeString(type_structure.subtype) + ">"
raise ValueError, "Unknown type: " + str(type_structure)
def typeValueString(self, type_structure, value):
if type_structure.typecode == Type.INT32:
if value is None: return "0"
return str(value)
if type_structure.typecode == Type.INT64:
if value is None: return "0l"
return str(value)
if type_structure.typecode == Type.BOOL:
if value: return "true"
return "false"
if type_structure.typecode == Type.STRING:
if value is None: return '""'
return '"' + value + '"'
else:
return "new " + self.typeString(type_structure) + "()"
def serializeList(self, type_structure, name):
o = self.serialize(INT32, name + ".size()") + "\n";
# Ensure the loop variable is unique for nested loops
index_name = "_i%d_" % (self.loop_count_)
self.loop_count_ += 1
o += self.out("for (%(type)s %(index)s : %(name)s) {" %
{ "type": self.typeString(type_structure.subtype), "index": index_name, "name": name })
self.indent()
o += self.out(self.serialize(type_structure.subtype, index_name));
self.unindent()
o += self.out("}")
# Trim trailing \n
return o[:-1]
def serialize(self, type_structure, name):
if type_structure.typecode == Type.LIST:
return self.serializeList(type_structure, name)
elif type_structure.typecode == Type.STRUCT:
return "%s.writeBytes(_out_);" % (name)
else:
return "com.relationalcloud.network.SerializationUtilities.writeBytes(%s, _out_);" % (name)
def deserialize(self, type_structure, name):
if type_structure.typecode == Type.LIST:
# Make a unique size variable
size_name = "_size%d_" % (self.loop_count_)
o = self.deserialize(INT32, "int " + size_name) + "\n"
#~ o = "int %s = com.relationalcloud.network.SerializationUtilities.readInt32(_in_);\n" % (size_name)
o += self.out("assert %s >= 0;" % (size_name))
o += self.out("%s.clear();" % (name))
o += self.out("%s.ensureCapacity(%s);" % (name, size_name))
# Ensure the temp variable is unique for nested loops
temp_name = "_temp%d_" % (self.loop_count_)
self.loop_count_ += 1
o += self.out("while (%s > 0) {" % (size_name))
self.indent()
o += self.out("%s %s = %s;" % (self.typeString(type_structure.subtype), temp_name,
self.typeValueString(type_structure.subtype, None)));
o += self.out(self.deserialize(type_structure.subtype, temp_name))
o += self.out("%s.add(%s);" % (name, temp_name));
o += self.out("%s -= 1;" % (size_name));
self.unindent()
o += self.out("}")
# Trim trailing \n
return o[:-1]
elif type_structure.typecode == Type.STRUCT:
return "if (!%s.readBytes(_in_)) return false;" % (name)
elif type_structure.typecode == Type.STRING:
o = "%s = com.relationalcloud.network.SerializationUtilities.readString(_in_);\n" % (name)
o += self.out("if (%s == null) return false;" % (name))
return o[:-1]
elif type_structure.typecode == Type.INT32:
temp_buffer = "_buffer%d_" % (self.loop_count_)
self.loop_count_ += 1
o = "byte[] %s = com.relationalcloud.network.SerializationUtilities.readBytes(4, _in_);\n" % (temp_buffer)
o += self.out("if (%s == null) return false;" % (temp_buffer));
o += self.out("%s = com.relationalcloud.network.SerializationUtilities.fromLittleEndianInt32(%s);" % (name, temp_buffer));
return o[:-1]
else:
raise ValueError("TODO " + type_structure)
def javaName(self, name):
# Convert C++ style names into Java style names
CPP_NAME_RE = re.compile("_([a-z])")
def stripUpper(matchobj):
return matchobj.group(1).upper()
return CPP_NAME_RE.sub(stripUpper, name)
def addMessage(self, message):
self.buffer += "\n"
self.buffer += self.out("/** " + message.comment + "*/")
self.buffer += self.out("public static final class %s {" % (message.name))
self.indent()
# Add field definitions
for type_structure, name, default, comment in message.fields:
self.buffer += "\n"
if len(comment) > 0:
self.buffer += self.out("/** " + comment + " */")
self.buffer += self.out("public %s %s = %s;" % (self.typeString(type_structure), self.javaName(name), self.typeValueString(type_structure, default)))
# Add toBytes
self.buffer += "\n"
self.buffer += self.out("/** Writes the serialized message to the OutputStream. */")
self.buffer += self.out("public void writeBytes(java.io.OutputStream _out_) {")
self.indent()
self.buffer += self.out("try {")
self.indent();
for type_structure, name, default, comment in message.fields:
self.buffer += self.out(self.serialize(type_structure, self.javaName(name)))
self.unindent()
self.buffer += self.out("} catch (java.io.IOException e) {")
self.indent();
self.buffer += self.out("throw new RuntimeException(e);")
self.unindent();
self.buffer += self.out("}")
self.unindent();
self.buffer += self.out("}")
# Add fromBytes
self.buffer += "\n"
self.buffer += self.out("/** Reads the serialized values from the InputStream. */")
self.buffer += self.out("public boolean readBytes(java.io.InputStream _in_) {")
self.indent()
self.buffer += self.out("try {")
self.indent();
for type_structure, name, default, comment in message.fields:
self.buffer += self.out(self.deserialize(type_structure, self.javaName(name)))
self.unindent()
self.buffer += self.out("} catch (java.io.IOException e) {")
self.indent();
self.buffer += self.out("throw new RuntimeException(e);")
self.unindent();
self.buffer += self.out("}")
self.buffer += self.out("return true;")
self.unindent()
self.buffer += self.out("}")
# Add typeCode
self.buffer += "\n"
self.buffer += self.out("/** Unique identifier for this message type. */");
self.buffer += self.out("public static int TYPE_CODE = %d;" % (
reinterpretAsInt32(message.typeCode())))
# All done
self.unindent()
self.buffer += self.out("}")
def output(self):
self.buffer += "}\n"
return self.buffer
def main(messages, namespace):
import sys
if len(sys.argv) != 2:
sys.stderr.write("%s [output .h]\n" % (sys.argv[0]))
sys.exit(1)
output_path = sys.argv[1]
if output_path.endswith('.h'):
out = CPPOutput(output_path, namespace)
elif output_path.endswith('.java'):
out = JavaOutput(output_path, namespace)
else:
sys.stderr.write("unsupported output type (try .h or .java)\n")
sys.exit(1)
for message in messages:
out.addMessage(message)
f = open(output_path, "w")
f.write(out.output())
f.close()
| apavlo/h-store | src/dtxn/stupidcompiler.py | Python | gpl-3.0 | 20,928 |
# -*- coding: utf-8 -*-
import unittest
import os
import tempfile
import numpy
from tables import *
from tables.tests import common
from tables.tests.common import allequal
# To delete the internal attributes automagically
unittest.TestCase.tearDown = common.cleanup
class BasicTestCase(unittest.TestCase):
# Default values
flavor = "numpy"
type = 'int32'
shape = (2, 2)
start = 0
stop = 10
step = 1
length = 1
chunkshape = (5, 5)
compress = 0
complib = "zlib" # Default compression library
shuffle = 0
fletcher32 = 0
reopen = 1 # Tells whether the file has to be reopened on each test or not
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, "w")
self.rootgroup = self.fileh.root
self.populateFile()
if self.reopen:
# Close the file
self.fileh.close()
def populateFile(self):
group = self.rootgroup
if self.type == "string":
atom = StringAtom(itemsize=self.length)
else:
atom = Atom.from_type(self.type)
title = self.__class__.__name__
filters = Filters(complevel = self.compress,
complib = self.complib,
shuffle = self.shuffle,
fletcher32 = self.fletcher32)
carray = self.fileh.createCArray(group, 'carray1', atom, self.shape,
title, filters=filters,
chunkshape = self.chunkshape)
carray.flavor = self.flavor
# Fill it with data
self.rowshape = list(carray.shape)
self.objsize = self.length * numpy.prod(self.shape)
if self.flavor == "numpy":
if self.type == "string":
object = numpy.ndarray(buffer=b"a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object.shape = self.shape
if common.verbose:
print "Object to append -->", repr(object)
carray[...] = object
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test00_attributes(self):
if self.reopen:
self.fileh = openFile(self.file, "r")
obj = self.fileh.getNode("/carray1")
self.assertEqual(obj.flavor, self.flavor)
self.assertEqual(obj.shape, self.shape)
self.assertEqual(obj.ndim, len(self.shape))
self.assertEqual(obj.chunkshape, self.chunkshape)
self.assertEqual(obj.nrows, self.shape[0])
self.assertEqual(obj.atom.type, self.type)
def test01_readCArray(self):
"""Checking read() of chunked layout arrays"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01_readCArray..." % self.__class__.__name__
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "r")
carray = self.fileh.getNode("/carray1")
# Choose a small value for buffer size
carray.nrowsinbuf = 3
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.flavor == "numpy":
if self.type == "string":
object_ = numpy.ndarray(buffer=b"a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
stop = self.stop
# stop == None means read only the element designed by start
# (in read() contexts)
if self.stop == None:
if self.start == -1: # corner case
stop = carray.nrows
else:
stop = self.start + 1
# Protection against number of elements less than existing
#if rowshape[self.extdim] < self.stop or self.stop == 0:
if carray.nrows < stop:
# self.stop == 0 means last row only in read()
# and not in [::] slicing notation
stop = int(carray.nrows)
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_[self.start:stop:self.step].copy()
# Read all the array
try:
data = carray.read(self.start, stop, self.step)
except IndexError:
if self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = numpy.empty(shape=self.shape, dtype=self.type)
if common.verbose:
if hasattr(object, "shape"):
print "shape should look as:", object.shape
print "Object read ==>", repr(data)
print "Should look like ==>", repr(object)
if hasattr(data, "shape"):
self.assertEqual(len(data.shape), len(self.shape))
else:
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
def test01_readCArray_out_argument(self):
"""Checking read() of chunked layout arrays"""
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "r")
carray = self.fileh.getNode("/carray1")
# Choose a small value for buffer size
carray.nrowsinbuf = 3
# Build the array to do comparisons
if self.flavor == "numpy":
if self.type == "string":
object_ = numpy.ndarray(buffer=b"a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
stop = self.stop
# stop == None means read only the element designed by start
# (in read() contexts)
if self.stop == None:
if self.start == -1: # corner case
stop = carray.nrows
else:
stop = self.start + 1
# Protection against number of elements less than existing
#if rowshape[self.extdim] < self.stop or self.stop == 0:
if carray.nrows < stop:
# self.stop == 0 means last row only in read()
# and not in [::] slicing notation
stop = int(carray.nrows)
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_[self.start:stop:self.step].copy()
# Read all the array
try:
data = numpy.empty(self.shape, dtype=carray.atom.dtype)
data = data[self.start:stop:self.step].copy()
carray.read(self.start, stop, self.step, out=data)
except IndexError:
if self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = numpy.empty(shape=self.shape, dtype=self.type)
if hasattr(data, "shape"):
self.assertEqual(len(data.shape), len(self.shape))
else:
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
def test02_getitemCArray(self):
"""Checking chunked layout array __getitem__ special method"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_getitemCArray..." % self.__class__.__name__
if not hasattr(self, "slices"):
# If there is not a slices attribute, create it
self.slices = (slice(self.start, self.stop, self.step),)
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "r")
carray = self.fileh.getNode("/carray1")
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.type == "string":
object_ = numpy.ndarray(buffer=b"a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_.__getitem__(self.slices).copy()
# Read data from the array
try:
data = carray.__getitem__(self.slices)
except IndexError:
print "IndexError!"
if self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = numpy.empty(shape=self.shape, dtype=self.type)
if common.verbose:
print "Object read:\n", repr(data) #, data.info()
print "Should look like:\n", repr(object) #, object.info()
if hasattr(object, "shape"):
print "Original object shape:", self.shape
print "Shape read:", data.shape
print "shape should look as:", object.shape
if not hasattr(data, "shape"):
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
def test03_setitemCArray(self):
"""Checking chunked layout array __setitem__ special method"""
if self.__class__.__name__ == "Ellipsis6CArrayTestCase":
# see test_earray.py BasicTestCase.test03_setitemEArray
return
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03_setitemCArray..." % self.__class__.__name__
if not hasattr(self, "slices"):
# If there is not a slices attribute, create it
self.slices = (slice(self.start, self.stop, self.step),)
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "a")
carray = self.fileh.getNode("/carray1")
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.type == "string":
object_ = numpy.ndarray(buffer=b"a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_.__getitem__(self.slices).copy()
if self.type == "string":
if hasattr(self, "wslice"):
object[self.wslize] = "xXx"
carray[self.wslice] = "xXx"
elif sum(object[self.slices].shape) != 0 :
object[:] = "xXx"
if object.size > 0:
carray[self.slices] = object
else:
if hasattr(self, "wslice"):
object[self.wslice] = object[self.wslice] * 2 + 3
carray[self.wslice] = carray[self.wslice] * 2 + 3
elif sum(object[self.slices].shape) != 0:
object = object * 2 + 3
if numpy.prod(object.shape) > 0:
carray[self.slices] = carray[self.slices] * 2 + 3
# Cast again object to its original type
object = numpy.array(object, dtype=carray.atom.dtype)
# Read datafrom the array
try:
data = carray.__getitem__(self.slices)
except IndexError:
print "IndexError!"
if self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = numpy.empty(shape=self.shape, dtype=self.type)
if common.verbose:
print "Object read:\n", repr(data) #, data.info()
print "Should look like:\n", repr(object) #, object.info()
if hasattr(object, "shape"):
print "Original object shape:", self.shape
print "Shape read:", data.shape
print "shape should look as:", object.shape
if not hasattr(data, "shape"):
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
class BasicWriteTestCase(BasicTestCase):
type = 'int32'
shape = (2,)
chunkshape = (5,)
step = 1
wslice = 1 # single element case
class BasicWrite2TestCase(BasicTestCase):
type = 'int32'
shape = (2,)
chunkshape = (5,)
step = 1
wslice = slice(shape[0]-2, shape[0], 2) # range of elements
reopen = 0 # This case does not reopen files
class EmptyCArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
start = 0
stop = 10
step = 1
class EmptyCArray2TestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
start = 0
stop = 10
step = 1
reopen = 0 # This case does not reopen files
class SlicesCArrayTestCase(BasicTestCase):
compress = 1
complib = "lzo"
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
slices = (slice(1, 2, 1), slice(1, 3, 1))
class EllipsisCArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
#slices = (slice(1,2,1), Ellipsis)
slices = (Ellipsis, slice(1, 2, 1))
class Slices2CArrayTestCase(BasicTestCase):
compress = 1
complib = "lzo"
type = 'int32'
shape = (2, 2, 4)
chunkshape = (5, 5, 5)
slices = (slice(1, 2, 1), slice(None, None, None), slice(1, 4, 2))
class Ellipsis2CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 4)
chunkshape = (5, 5, 5)
slices = (slice(1, 2, 1), Ellipsis, slice(1, 4, 2))
class Slices3CArrayTestCase(BasicTestCase):
compress = 1 # To show the chunks id DEBUG is on
complib = "lzo"
type = 'int32'
shape = (2, 3, 4, 2)
chunkshape = (5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, None, None), slice(1, 4, 2)) # Don't work
#slices = (slice(None, None, None), slice(0, None, None), slice(1,4,1)) # W
#slices = (slice(None, None, None), slice(None, None, None), slice(1,4,2)) # N
#slices = (slice(1,2,1), slice(None, None, None), slice(1,4,2)) # N
# Disable the failing test temporarily with a working test case
slices = (slice(1, 2, 1), slice(1, 4, None), slice(1, 4, 2)) # Y
#slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,1)) # Y
slices = (slice(1, 2, 1), slice(0, 4, None), slice(1, 4, 2)) # N
#slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,2), slice(0,100,1)) # N
class Slices4CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 2, 5, 6)
chunkshape = (5, 5, 5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, None, None), slice(1, 4, 2),
slice(0, 4, 2), slice(3, 5, 2), slice(2, 7, 1))
class Ellipsis3CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 2)
chunkshape = (5, 5, 5, 5)
slices = (Ellipsis, slice(0, 4, None), slice(1, 4, 2))
slices = (slice(1, 2, 1), slice(0, 4, None), slice(1, 4, 2), Ellipsis)
class Ellipsis4CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (Ellipsis, slice(0, 4, None), slice(1, 4, 2))
slices = (slice(1, 2, 1), Ellipsis, slice(1, 4, 2))
class Ellipsis5CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, 4, None), Ellipsis)
class Ellipsis6CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
# The next slices gives problems with setting values (test03)
# This is a problem on the test design, not the Array.__setitem__
# code, though. See # see test_earray.py Ellipsis6EArrayTestCase
slices = (slice(1, 2, 1), slice(0, 4, None), 2, Ellipsis)
class Ellipsis7CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, 4, None), slice(2, 3), Ellipsis)
class MD3WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 3)
chunkshape = (4, 4, 4)
step = 2
class MD5WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 3, 4, 5) # ok
#shape = (1, 1, 2, 1) # Minimum shape that shows problems with HDF5 1.6.1
#shape = (2, 3, 2, 4, 5) # Floating point exception (HDF5 1.6.1)
#shape = (2, 3, 3, 2, 5, 6) # Segmentation fault (HDF5 1.6.1)
chunkshape = (1, 1, 1, 1, 1)
start = 1
stop = 10
step = 10
class MD6WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 3, 2, 5, 6)
chunkshape = (1, 1, 1, 1, 5, 6)
start = 1
stop = 10
step = 3
class MD6WriteTestCase__(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (1, 1)
start = 1
stop = 3
step = 1
class MD7WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 3, 4, 5, 2, 3)
chunkshape = (10, 10, 10, 10, 10, 10, 10)
start = 1
stop = 10
step = 2
class MD10WriteTestCase(BasicTestCase):
type = 'int32'
shape = (1, 2, 3, 4, 5, 5, 4, 3, 2, 2)
chunkshape = (5, 5, 5, 5, 5, 5, 5, 5, 5, 5)
start = -1
stop = -1
step = 10
class ZlibComprTestCase(BasicTestCase):
compress = 1
complib = "zlib"
start = 3
#stop = 0 # means last row
stop = None # means last row from 0.8 on
step = 10
class ZlibShuffleTestCase(BasicTestCase):
shuffle = 1
compress = 1
complib = "zlib"
# case start < stop , i.e. no rows read
start = 3
stop = 1
step = 10
class BloscComprTestCase(BasicTestCase):
compress = 1 # sss
complib = "blosc"
chunkshape = (10, 10)
start = 3
stop = 10
step = 3
class BloscShuffleTestCase(BasicTestCase):
shape = (20, 30)
compress = 1
shuffle = 1
complib = "blosc"
chunkshape = (100, 100)
start = 3
stop = 10
step = 7
class LZOComprTestCase(BasicTestCase):
compress = 1 # sss
complib = "lzo"
chunkshape = (10, 10)
start = 3
stop = 10
step = 3
class LZOShuffleTestCase(BasicTestCase):
shape = (20, 30)
compress = 1
shuffle = 1
complib = "lzo"
chunkshape = (100, 100)
start = 3
stop = 10
step = 7
class Bzip2ComprTestCase(BasicTestCase):
shape = (20, 30)
compress = 1
complib = "bzip2"
chunkshape = (100, 100)
start = 3
stop = 10
step = 8
class Bzip2ShuffleTestCase(BasicTestCase):
shape = (20, 30)
compress = 1
shuffle = 1
complib = "bzip2"
chunkshape = (100, 100)
start = 3
stop = 10
step = 6
class Fletcher32TestCase(BasicTestCase):
shape = (60, 50)
compress = 0
fletcher32 = 1
chunkshape = (50, 50)
start = 4
stop = 20
step = 7
class AllFiltersTestCase(BasicTestCase):
compress = 1
shuffle = 1
fletcher32 = 1
complib = "zlib"
chunkshape = (20, 20) # sss
start = 2
stop = 99
step = 6
class FloatTypeTestCase(BasicTestCase):
type = 'float64'
shape = (2, 2)
chunkshape = (5, 5)
start = 3
stop = 10
step = 20
class ComplexTypeTestCase(BasicTestCase):
type = 'complex128'
shape = (2, 2)
chunkshape = (5, 5)
start = 3
stop = 10
step = 20
class StringTestCase(BasicTestCase):
type = "string"
length = 20
shape = (2, 2)
#shape = (2,2,20)
chunkshape = (5, 5)
start = 3
stop = 10
step = 20
slices = (slice(0, 1), slice(1, 2))
class String2TestCase(BasicTestCase):
type = "string"
length = 20
shape = (2, 20)
chunkshape = (5, 5)
start = 1
stop = 10
step = 2
class StringComprTestCase(BasicTestCase):
type = "string"
length = 20
shape = (20, 2, 10)
#shape = (20,0,10,20)
compr = 1
#shuffle = 1 # this shouldn't do nothing on chars
chunkshape = (50, 50, 2)
start = -1
stop = 100
step = 20
class Int8TestCase(BasicTestCase):
type = "int8"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class Int16TestCase(BasicTestCase):
type = "int16"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = 1
stop = 100
step = 1
class Int32TestCase(BasicTestCase):
type = "int32"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class Float16TestCase(BasicTestCase):
type = "float16"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class Float32TestCase(BasicTestCase):
type = "float32"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class Float64TestCase(BasicTestCase):
type = "float64"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class Float96TestCase(BasicTestCase):
type = "float96"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class Float128TestCase(BasicTestCase):
type = "float128"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class Complex64TestCase(BasicTestCase):
type = "complex64"
shape = (4,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class Complex128TestCase(BasicTestCase):
type = "complex128"
shape = (20,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class ComprTestCase(BasicTestCase):
type = "float64"
compress = 1
shuffle = 1
shape = (200,)
compr = 1
chunkshape = (21,)
start = 51
stop = 100
step = 7
# this is a subset of the tests in test_array.py, mostly to verify that errors
# are handled in the same way
class ReadOutArgumentTests(unittest.TestCase):
def setUp(self):
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, mode='w')
self.size = 1000
self.filters = Filters(complevel=1, complib='blosc')
def tearDown(self):
self.fileh.close()
os.remove(self.file)
def create_array(self):
array = numpy.arange(self.size, dtype='i8')
disk_array = self.fileh.createCArray('/', 'array', Int64Atom(),
(self.size, ),
filters=self.filters)
disk_array[:] = array
return array, disk_array
def test_read_entire_array(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'i8')
disk_array.read(out=out_buffer)
numpy.testing.assert_equal(out_buffer, array)
def test_read_non_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'i8')
out_buffer_slice = out_buffer[0:self.size:2]
# once Python 2.6 support is dropped, this could change
# to assertRaisesRegexp to check exception type and message at once
self.assertRaises(ValueError, disk_array.read, 0, self.size, 2,
out_buffer_slice)
try:
disk_array.read(0, self.size, 2, out_buffer_slice)
except ValueError as exc:
self.assertEqual('output array not C contiguous', str(exc))
def test_buffer_too_small(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), 'i8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
class SizeOnDiskInMemoryPropertyTestCase(unittest.TestCase):
def setUp(self):
self.array_size = (10000, 10)
# set chunkshape so it divides evenly into array_size, to avoid
# partially filled chunks
self.chunkshape = (1000, 10)
# approximate size (in bytes) of non-data portion of hdf5 file
self.hdf_overhead = 6000
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, mode = "w")
def tearDown(self):
self.fileh.close()
# Then, delete the file
os.remove(self.file)
common.cleanup(self)
def create_array(self, complevel):
filters = Filters(complevel=complevel, complib='blosc')
self.array = self.fileh.createCArray('/', 'somearray', Int16Atom(),
self.array_size,
filters=filters,
chunkshape=self.chunkshape)
def test_no_data(self):
complevel = 0
self.create_array(complevel)
self.assertEqual(self.array.size_on_disk, 0)
self.assertEqual(self.array.size_in_memory, 10000 * 10 * 2)
def test_data_no_compression(self):
complevel = 0
self.create_array(complevel)
self.array[:] = 1
self.assertEqual(self.array.size_on_disk, 10000 * 10 * 2)
self.assertEqual(self.array.size_in_memory, 10000 * 10 * 2)
def test_highly_compressible_data(self):
complevel = 1
self.create_array(complevel)
self.array[:] = 1
self.fileh.flush()
file_size = os.stat(self.file).st_size
self.assertTrue(
abs(self.array.size_on_disk - file_size) <= self.hdf_overhead)
self.assertTrue(self.array.size_on_disk < self.array.size_in_memory)
self.assertEqual(self.array.size_in_memory, 10000 * 10 * 2)
# XXX
def test_random_data(self):
complevel = 1
self.create_array(complevel)
self.array[:] = numpy.random.randint(0, 1e6, self.array_size)
self.fileh.flush()
file_size = os.stat(self.file).st_size
self.assertTrue(
abs(self.array.size_on_disk - file_size) <= self.hdf_overhead)
# XXX: check. The test fails if blosc is not available
if whichLibVersion('blosc') is not None:
self.assertAlmostEqual(self.array.size_on_disk, 10000 * 10 * 2)
else:
self.assertTrue(
abs(self.array.size_on_disk - 10000 * 10 * 2) < 200)
class OffsetStrideTestCase(unittest.TestCase):
mode = "w"
compress = 0
complib = "zlib" # Default compression library
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, self.mode)
self.rootgroup = self.fileh.root
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test01a_String(self):
"""Checking carray with offseted NumPy strings appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01a_String..." % self.__class__.__name__
shape = (3, 2, 2)
# Create an string atom
carray = self.fileh.createCArray(root, 'strings',
StringAtom(itemsize=3), shape,
"Array of strings",
chunkshape=(1, 2, 2))
a = numpy.array([[["a", "b"], ["123", "45"], ["45", "123"]]], dtype="S3")
carray[0] = a[0, 1:]
a = numpy.array([[["s", "a"], ["ab", "f"], ["s", "abc"], ["abc", "f"]]])
carray[1] = a[0, 2:]
# Read all the data:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Second row in carray ==>", data[1].tolist()
self.assertEqual(carray.nrows, 3)
self.assertEqual(data[0].tolist(), [[b"123", b"45"], [b"45", b"123"]])
self.assertEqual(data[1].tolist(), [[b"s", b"abc"], [b"abc", b"f"]])
self.assertEqual(len(data[0]), 2)
self.assertEqual(len(data[1]), 2)
def test01b_String(self):
"""Checking carray with strided NumPy strings appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01b_String..." % self.__class__.__name__
shape = (3, 2, 2)
# Create an string atom
carray = self.fileh.createCArray(root, 'strings',
StringAtom(itemsize=3), shape,
"Array of strings",
chunkshape=(1, 2, 2))
a = numpy.array([[["a", "b"], ["123", "45"], ["45", "123"]]], dtype="S3")
carray[0] = a[0, ::2]
a = numpy.array([[["s", "a"], ["ab", "f"], ["s", "abc"], ["abc", "f"]]])
carray[1] = a[0, ::2]
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Second row in carray ==>", data[1].tolist()
self.assertEqual(carray.nrows, 3)
self.assertEqual(data[0].tolist(), [[b"a", b"b"], [b"45", b"123"]])
self.assertEqual(data[1].tolist(), [[b"s", b"a"], [b"s", b"abc"]])
self.assertEqual(len(data[0]), 2)
self.assertEqual(len(data[1]), 2)
def test02a_int(self):
"""Checking carray with offseted NumPy ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02a_int..." % self.__class__.__name__
shape = (3, 3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1, 3))
a = numpy.array([(0, 0, 0), (1, 0, 3), (1, 1, 1), (0, 0, 0)], dtype='int32')
carray[0:2] = a[2:] # Introduce an offset
a = numpy.array([(1, 1, 1), (-1, 0, 0)], dtype='int32')
carray[2:3] = a[1:] # Introduce an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([1, 1, 1], dtype='int32')))
self.assertTrue(allequal(data[1], numpy.array([0, 0, 0], dtype='int32')))
self.assertTrue(allequal(data[2], numpy.array([-1, 0, 0], dtype='int32')))
def test02b_int(self):
"""Checking carray with strided NumPy ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02b_int..." % self.__class__.__name__
shape = (3, 3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1, 3))
a = numpy.array([(0, 0, 0), (1, 0, 3), (1, 1, 1), (3, 3, 3)], dtype='int32')
carray[0:2] = a[::3] # Create an offset
a = numpy.array([(1, 1, 1), (-1, 0, 0)], dtype='int32')
carray[2:3] = a[::2] # Create an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([0, 0, 0], dtype='int32')))
self.assertTrue(allequal(data[1], numpy.array([3, 3, 3], dtype='int32')))
self.assertTrue(allequal(data[2], numpy.array([1, 1, 1], dtype='int32')))
class CopyTestCase(unittest.TestCase):
def test01a_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01a_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2, 2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([[456, 2], [3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test01b_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01b_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2, 2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(5, 5))
array1[...] = numpy.array([[456, 2], [3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# By default, the chunkshape should be the same
self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test01c_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01c_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (5, 5)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[:2, :2] = numpy.array([[456, 2], [3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test02_copy(self):
"""Checking CArray.copy() method (where specified)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (5, 5)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[:2, :2] = numpy.array([[456, 2], [3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
group1 = fileh.createGroup("/", "group1")
array2 = array1.copy(group1, 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.group1.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03a_copy(self):
"""Checking CArray.copy() method (python flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03c_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2, 2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1.flavor = "python"
array1[...] = [[456, 2], [3, 457]]
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertEqual(array1.read(), array2.read())
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03b_copy(self):
"""Checking CArray.copy() method (string python flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03d_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2, 2)
arr = StringAtom(itemsize=4)
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1.flavor = "python"
array1[...] = [["456", "2"], ["3", "457"]]
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "type value-->", type(array2[:][0][0])
print "value-->", array2[:]
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertEqual(array1.read(), array2.read())
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03c_copy(self):
"""Checking CArray.copy() method (chararray flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03e_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2, 2)
arr = StringAtom(itemsize=4)
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([["456", "2"], ["3", "457"]], dtype="S4")
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test04_copy(self):
"""Checking CArray.copy() method (checking title copying)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test04_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2, 2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([[456, 2], [3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', title="title array2")
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
# Assert user attributes
if common.verbose:
print "title of destination array-->", array2.title
self.assertEqual(array2.title, "title array2")
# Close the file
fileh.close()
os.remove(file)
def test05_copy(self):
"""Checking CArray.copy() method (user attributes copied)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test05_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2, 2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([[456, 2], [3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=1)
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Assert user attributes
self.assertEqual(array2.attrs.attr1, "attr1")
self.assertEqual(array2.attrs.attr2, 2)
# Close the file
fileh.close()
os.remove(file)
def test05b_copy(self):
"""Checking CArray.copy() method (user attributes not copied)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test05b_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an Array
shape = (2, 2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([[456, 2], [3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=0)
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Assert user attributes
self.assertEqual(hasattr(array2.attrs, "attr1"), 0)
self.assertEqual(hasattr(array2.attrs, "attr2"), 0)
# Close the file
fileh.close()
os.remove(file)
class CloseCopyTestCase(CopyTestCase):
close = 1
class OpenCopyTestCase(CopyTestCase):
close = 0
class CopyIndexTestCase(unittest.TestCase):
nrowsinbuf = 2
def test01_index(self):
"""Checking CArray.copy() method with indexes"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01_index..." % self.__class__.__name__
# Create an instance of an HDF5 Array
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (100, 2)
atom = Int32Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2, 2))
r = numpy.arange(200, dtype='int32')
r.shape = shape
array1[...] = r
# Select a different buffer size:
array1.nrowsinbuf = self.nrowsinbuf
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print "nrows in array2-->", array2.nrows
print "and it should be-->", r2.shape[0]
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#assert array1.chunkshape == array2.chunkshape
self.assertEqual(r2.shape[0], array2.nrows)
# Close the file
fileh.close()
os.remove(file)
def _test02_indexclosef(self):
"""Checking CArray.copy() method with indexes (close file version)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_indexclosef..." % self.__class__.__name__
# Create an instance of an HDF5 Array
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (100, 2)
atom = Int32Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2, 2))
r = numpy.arange(200, dtype='int32')
r.shape = shape
array1[...] = r
# Select a different buffer size:
array1.nrowsinbuf = self.nrowsinbuf
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
# Close and reopen the file
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertEqual(array1.chunkshape, array2.chunkshape)
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print "nrows in array2-->", array2.nrows
print "and it should be-->", r2.shape[0]
self.assertEqual(r2.shape[0], array2.nrows)
# Close the file
fileh.close()
os.remove(file)
class CopyIndex1TestCase(CopyIndexTestCase):
nrowsinbuf = 1
start = 0
stop = 7
step = 1
class CopyIndex2TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 0
stop = -1
step = 1
class CopyIndex3TestCase(CopyIndexTestCase):
nrowsinbuf = 3
start = 1
stop = 7
step = 1
class CopyIndex4TestCase(CopyIndexTestCase):
nrowsinbuf = 4
start = 0
stop = 6
step = 1
class CopyIndex5TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 3
stop = 7
step = 1
class CopyIndex6TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 3
stop = 6
step = 2
class CopyIndex7TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 10
class CopyIndex8TestCase(CopyIndexTestCase):
start = 6
stop = -1 # Negative values means starting from the end
step = 1
class CopyIndex9TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 1
class CopyIndex10TestCase(CopyIndexTestCase):
nrowsinbuf = 1
start = 3
stop = 4
step = 2
class CopyIndex11TestCase(CopyIndexTestCase):
start = -3
stop = -1
step = 2
class CopyIndex12TestCase(CopyIndexTestCase):
start = -1 # Should point to the last element
stop = None # None should mean the last element (including it)
step = 1
# The next test should be run only in **heavy** mode
class Rows64bitsTestCase(unittest.TestCase):
narows = 1000*1000 # each array will have 1 million entries
#narows = 1000 # for testing only
nanumber = 1000*3 # That should account for more than 2**31-1
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
fileh = self.fileh = openFile(self.file, "a")
# Create an CArray
shape = (self.narows*self.nanumber,)
array = fileh.createCArray(fileh.root, 'array',
Int8Atom(), shape,
filters=Filters(complib='lzo',
complevel=1))
# Fill the array
na = numpy.arange(self.narows, dtype='int8')
#~ for i in xrange(self.nanumber):
#~ s = slice(i*self.narows, (i+1)*self.narows)
#~ array[s] = na
s = slice(0, self.narows)
array[s] = na
s = slice((self.nanumber-1)*self.narows, self.nanumber*self.narows)
array[s] = na
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test01_basiccheck(self):
"Some basic checks for carrays exceeding 2**31 rows"
fileh = self.fileh
array = fileh.root.array
if self.close:
if common.verbose:
# Check how many entries there are in the array
print "Before closing"
print "Entries:", array.nrows, type(array.nrows)
print "Entries:", array.nrows / (1000*1000), "Millions"
print "Shape:", array.shape
# Close the file
fileh.close()
# Re-open the file
fileh = self.fileh = openFile(self.file)
array = fileh.root.array
if common.verbose:
print "After re-open"
# Check how many entries there are in the array
if common.verbose:
print "Entries:", array.nrows, type(array.nrows)
print "Entries:", array.nrows / (1000*1000), "Millions"
print "Shape:", array.shape
print "Last 10 elements-->", array[-10:]
stop = self.narows%256
if stop > 127:
stop -= 256
start = stop - 10
#print "start, stop-->", start, stop
print "Should look like:", numpy.arange(start, stop, dtype='int8')
nrows = self.narows*self.nanumber
# check nrows
self.assertEqual(array.nrows, nrows)
# Check shape
self.assertEqual(array.shape, (nrows,))
# check the 10 first elements
self.assertTrue(allequal(array[:10], numpy.arange(10, dtype='int8')))
# check the 10 last elements
stop = self.narows%256
if stop > 127:
stop -= 256
start = stop - 10
self.assertTrue(allequal(array[-10:],
numpy.arange(start, stop, dtype='int8')))
class Rows64bitsTestCase1(Rows64bitsTestCase):
close = 0
class Rows64bitsTestCase2(Rows64bitsTestCase):
close = 1
class BigArrayTestCase(common.TempFileMixin, common.PyTablesTestCase):
shape = (3000000000,) # more than 2**31-1
def setUp(self):
super(BigArrayTestCase, self).setUp()
# This should be fast since disk space isn't actually allocated,
# so this case is OK for non-heavy test runs.
self.h5file.createCArray('/', 'array', Int8Atom(), self.shape)
def test00_shape(self):
"""Check that the shape doesn't overflow."""
# See ticket #147.
self.assertEqual(self.h5file.root.array.shape, self.shape)
try:
self.assertEqual(len(self.h5file.root.array), self.shape[0])
except OverflowError:
# In python 2.4 calling "len(self.h5file.root.array)" raises
# an OverflowError also on 64bit platforms::
# OverflowError: __len__() should return 0 <= outcome < 2**31
import sys
if sys.version_info[:2] > (2, 4):
# This can't be avoided in 32-bit platforms.
self.assertTrue(self.shape[0] > numpy.iinfo(int).max,
"Array length overflowed but ``int`` "
"is wide enough." )
def test01_shape_reopen(self):
"""Check that the shape doesn't overflow after reopening."""
self._reopen('r')
self.test00_shape()
# Test for default values when creating arrays.
class DfltAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00_dflt(self):
"Check that Atom.dflt is honored (string version)."
# Create a CArray with default values
self.h5file.createCArray(
'/', 'bar', StringAtom(itemsize=5, dflt=b"abdef"), (10, 10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values,
numpy.array(["abdef"]*100, "S5").reshape(10, 10)))
def test01_dflt(self):
"Check that Atom.dflt is honored (int version)."
# Create a CArray with default values
self.h5file.createCArray('/', 'bar', IntAtom(dflt=1), (10, 10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values, numpy.ones((10, 10), "i4")))
def test02_dflt(self):
"Check that Atom.dflt is honored (float version)."
# Create a CArray with default values
self.h5file.createCArray('/', 'bar', FloatAtom(dflt=1.134), (10, 10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values, numpy.ones((10, 10), "f8")*1.134))
class DfltAtomNoReopen(DfltAtomTestCase):
reopen = False
class DfltAtomReopen(DfltAtomTestCase):
reopen = True
# Test for representation of defaults in atoms. Ticket #212.
class AtomDefaultReprTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00a_zeros(self):
"Testing default values. Zeros (scalar)."
N = ()
atom = StringAtom(itemsize=3, shape=N, dflt=b"")
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", repr(ca[0])
print "Defaults-->", repr(ca.atom.dflt)
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'S3')))
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'S3')))
def test00b_zeros(self):
"Testing default values. Zeros (array)."
N = 2
atom = StringAtom(itemsize=3, shape=N, dflt=b"")
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'S3')))
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'S3')))
def test01a_values(self):
"Testing default values. Ones."
N = 2
atom = Int32Atom(shape=N, dflt=1)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.ones(N, 'i4')))
self.assertTrue(allequal(ca.atom.dflt, numpy.ones(N, 'i4')))
def test01b_values(self):
"Testing default values. Generic value."
N = 2
generic = 112.32
atom = Float32Atom(shape=N, dflt=generic)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.ones(N, 'f4')*generic))
self.assertTrue(allequal(ca.atom.dflt, numpy.ones(N, 'f4')*generic))
def test02a_None(self):
"Testing default values. None (scalar)."
N = ()
atom = Int32Atom(shape=N, dflt=None)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", repr(ca[0])
print "Defaults-->", repr(ca.atom.dflt)
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'i4')))
def test02b_None(self):
"Testing default values. None (array)."
N = 2
atom = Int32Atom(shape=N, dflt=None)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'i4')))
class AtomDefaultReprNoReopen(AtomDefaultReprTestCase):
reopen = False
class AtomDefaultReprReopen(AtomDefaultReprTestCase):
reopen = True
class TruncateTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test(self):
"""Test for unability to truncate Array objects."""
array1 = self.h5file.createArray('/', 'array1', [0, 2])
self.assertRaises(TypeError, array1.truncate, 0)
# Test for dealing with multidimensional atoms
class MDAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test01a_assign(self):
"Assign a row to a (unidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2, 2)), (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign one row
ca[0] = [[1, 3], [4, 5]]
self.assertEqual(ca.nrows, 1)
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.array([[1, 3], [4, 5]], 'i4')))
def test01b_assign(self):
"Assign several rows to a (unidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2, 2)), (3,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign three rows
ca[:] = [[[1]], [[2]], [[3]]] # Simple broadcast
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2], numpy.array([[3, 3], [3, 3]], 'i4')))
def test02a_assign(self):
"Assign a row to a (multidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,)), (1, 3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign one row
ca[:] = [[[1, 3], [4, 5], [7, 9]]]
self.assertEqual(ca.nrows, 1)
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.array([[1, 3], [4, 5], [7, 9]], 'i4')))
def test02b_assign(self):
"Assign several rows to a (multidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,)), (3, 3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign three rows
ca[:] = [[[1, -3], [4, -5], [-7, 9]],
[[-1, 3], [-4, 5], [7, -8]],
[[-2, 3], [-5, 5], [7, -9]]]
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2],
numpy.array([[-2, 3], [-5, 5], [7, -9]], 'i4')))
def test03a_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2, 4)), (3, 2, 3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(2*3*2*4, dtype='i4').reshape((2, 3, 2, 4))
ca[:] = [a*1, a*2, a*3]
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2], a*3))
def test03b_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom (II)."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2, 4)), (2, 3, 3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(2*3*3*2*4, dtype='i4').reshape((2, 3, 3, 2, 4))
ca[:] = a
self.assertEqual(ca.nrows, 2)
if common.verbose:
print "Third row-->", ca[:, 2, ...]
self.assertTrue(allequal(ca[:, 2, ...], a[:, 2, ...]))
def test03c_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom (III)."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2, 4)), (3, 1, 2))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(3*1*2*2*4, dtype='i4').reshape((3, 1, 2, 2, 4))
ca[:] = a
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Second row-->", ca[:,:, 1, ...]
self.assertTrue(allequal(ca[:,:, 1, ...], a[:,:, 1, ...]))
class MDAtomNoReopen(MDAtomTestCase):
reopen = False
class MDAtomReopen(MDAtomTestCase):
reopen = True
# Test for building very large MD atoms without defaults. Ticket #211.
class MDLargeAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test01_create(self):
"Create a CArray with a very large MD atom."
N = 2**16 # 4x larger than maximum object header size (64 KB)
ca = self.h5file.createCArray('/', 'test', Int32Atom(shape=N), (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'i4')))
class MDLargeAtomNoReopen(MDLargeAtomTestCase):
reopen = False
class MDLargeAtomReopen(MDLargeAtomTestCase):
reopen = True
class AccessClosedTestCase(common.TempFileMixin, common.PyTablesTestCase):
def setUp(self):
super(AccessClosedTestCase, self).setUp()
self.array = self.h5file.createCArray(self.h5file.root, 'array',
Int32Atom(), (10, 10))
self.array[...] = numpy.zeros((10, 10))
def test_read(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.read)
def test_getitem(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.__getitem__, 0)
def test_setitem(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.__setitem__, 0, 0)
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
niter = 1
#common.heavy = 1 # uncomment this only for testing purposes
#theSuite.addTest(unittest.makeSuite(BasicTestCase))
for n in range(niter):
theSuite.addTest(unittest.makeSuite(BasicWriteTestCase))
theSuite.addTest(unittest.makeSuite(BasicWrite2TestCase))
theSuite.addTest(unittest.makeSuite(EmptyCArrayTestCase))
theSuite.addTest(unittest.makeSuite(EmptyCArray2TestCase))
theSuite.addTest(unittest.makeSuite(SlicesCArrayTestCase))
theSuite.addTest(unittest.makeSuite(Slices2CArrayTestCase))
theSuite.addTest(unittest.makeSuite(EllipsisCArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis2CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis3CArrayTestCase))
theSuite.addTest(unittest.makeSuite(ZlibComprTestCase))
theSuite.addTest(unittest.makeSuite(ZlibShuffleTestCase))
theSuite.addTest(unittest.makeSuite(BloscComprTestCase))
theSuite.addTest(unittest.makeSuite(BloscShuffleTestCase))
theSuite.addTest(unittest.makeSuite(LZOComprTestCase))
theSuite.addTest(unittest.makeSuite(LZOShuffleTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ComprTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ShuffleTestCase))
theSuite.addTest(unittest.makeSuite(FloatTypeTestCase))
theSuite.addTest(unittest.makeSuite(ComplexTypeTestCase))
theSuite.addTest(unittest.makeSuite(StringTestCase))
theSuite.addTest(unittest.makeSuite(String2TestCase))
theSuite.addTest(unittest.makeSuite(StringComprTestCase))
theSuite.addTest(unittest.makeSuite(Int8TestCase))
theSuite.addTest(unittest.makeSuite(Int16TestCase))
theSuite.addTest(unittest.makeSuite(Int32TestCase))
if hasattr(numpy, 'float16'):
theSuite.addTest(unittest.makeSuite(Float16TestCase))
theSuite.addTest(unittest.makeSuite(Float32TestCase))
theSuite.addTest(unittest.makeSuite(Float64TestCase))
if hasattr(numpy, 'float96'):
theSuite.addTest(unittest.makeSuite(Float96TestCase))
if hasattr(numpy, 'float128'):
theSuite.addTest(unittest.makeSuite(Float128TestCase))
theSuite.addTest(unittest.makeSuite(Complex64TestCase))
theSuite.addTest(unittest.makeSuite(Complex128TestCase))
theSuite.addTest(unittest.makeSuite(ComprTestCase))
theSuite.addTest(unittest.makeSuite(OffsetStrideTestCase))
theSuite.addTest(unittest.makeSuite(Fletcher32TestCase))
theSuite.addTest(unittest.makeSuite(AllFiltersTestCase))
theSuite.addTest(unittest.makeSuite(ReadOutArgumentTests))
theSuite.addTest(unittest.makeSuite(SizeOnDiskInMemoryPropertyTestCase))
theSuite.addTest(unittest.makeSuite(CloseCopyTestCase))
theSuite.addTest(unittest.makeSuite(OpenCopyTestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex1TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex2TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex3TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex4TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex5TestCase))
theSuite.addTest(unittest.makeSuite(BigArrayTestCase))
theSuite.addTest(unittest.makeSuite(DfltAtomNoReopen))
theSuite.addTest(unittest.makeSuite(DfltAtomReopen))
theSuite.addTest(unittest.makeSuite(AtomDefaultReprNoReopen))
theSuite.addTest(unittest.makeSuite(AtomDefaultReprReopen))
theSuite.addTest(unittest.makeSuite(TruncateTestCase))
theSuite.addTest(unittest.makeSuite(MDAtomNoReopen))
theSuite.addTest(unittest.makeSuite(MDAtomReopen))
theSuite.addTest(unittest.makeSuite(MDLargeAtomNoReopen))
theSuite.addTest(unittest.makeSuite(MDLargeAtomReopen))
theSuite.addTest(unittest.makeSuite(AccessClosedTestCase))
if common.heavy:
theSuite.addTest(unittest.makeSuite(Slices3CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Slices4CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis4CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis5CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis6CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis7CArrayTestCase))
theSuite.addTest(unittest.makeSuite(MD3WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD5WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD6WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD7WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD10WriteTestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex6TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex7TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex8TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex9TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex10TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex11TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex12TestCase))
theSuite.addTest(unittest.makeSuite(Rows64bitsTestCase1))
theSuite.addTest(unittest.makeSuite(Rows64bitsTestCase2))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## End:
| cpcloud/PyTables | tables/tests/test_carray.py | Python | bsd-3-clause | 82,028 |
from multiprocessing.dummy import Pool
import logging
import os
import threading
import urllib2
TARGET = "target"
PATH = "/tmp/save/"
URL = "http://image-net.org/api/text/imagenet.synset.geturls?wnid="
POOLSIZE = 128
TIMEOUT = 44
logging.basicConfig(filename = "download.log", filemode = "a", format = '%(asctime)s %(levelname)s: %(message)s', level = logging.WARNING)
def download(url, path):
try:
data = urllib2.urlopen(url, timeout = 44).read()
except:
logging.warning("%s %s", url, path)
return
obj = open(path, "wb")
obj.write(data)
obj.close()
def download_helper(s):
s = s.split();
download(s[0], s[-1])
def prepare_list(wnid):
url = URL + wnid
path = PATH + wnid
if not os.path.exists(path):
os.mkdir(path)
target = os.path.join(path, wnid)
download(url, target)
item = []
count = 0
aim = open(target)
for url in aim:
url = url.strip()
if url == "":
continue
prefix = ("0000" + str(count))[-4:]
suffix = url[-9:].replace('/', '\\')
name = prefix + "_" + suffix
item.append(url + " " + os.path.join(path, name))
count += 1
aim.close()
mutex.acquire()
parms.extend(item)
mutex.release()
wnid = []
target = open(TARGET)
for line in target:
wnid.append(line[:9])
target.close()
parms = []
mutex = threading.Lock()
pool = Pool(POOLSIZE)
pool.map(prepare_list, wnid)
pool.close()
pool.join()
print(len(parms))
down = Pool(POOLSIZE)
down.map(download_helper, parms)
down.close()
down.join()
| Brilliant/ImageNet-Downloader | main.py | Python | mit | 1,588 |
from logbook import Logger
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from .. import db
from datetime import timedelta, datetime
log = Logger(__name__)
def del_inactive_queries():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_query_count(conn)
db.del_inactive_queries(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_INACTIVE_QUERIES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_INACTIVE_QUERIES_LIMIT']
)
after = db.get_query_count(conn)
log.info('delete inactive queries, from {} to {}, deleted {}', before, after, before - after)
return before - after
def del_old_changes():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_change_count(conn)
db.del_old_changes(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_OLD_CHANGES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_OLD_CHANGES_LIMIT']
)
after = db.get_change_count(conn)
log.info('delete old changes, from {} to {}, deleted {}', before, after, before - after)
return before - after
| Answeror/torabot | torabot/tasks/delete.py | Python | mit | 1,277 |
# -*- coding: utf-8 -*-
'''
header_reader
This module will read and parse PDB header records
'''
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import itertools
import numpy as np
from . import data_struct as data_struct
from .pdb_record_reader import (parse_line, process_runon_line, rec_schemas)
from .exceptions import (NoHeaderReadError, IncompleteBiomtError)
class ReferenceInfo:
"""
Contains the information extracted from the PDB header about the structure.
"""
def __init__(self):
"""
Initialise all of the variables to store header information.
Naming is either self explanatory (disulphides) or based on the PDB
records the information is derived from.
"""
self.title = ''
self.citation = {}
self.compnd = {}
self.formuls = {}
self.hets = {}
self.metrics = {}
class PdbHeader:
"""
Reads and then stores PDB header information.
"""
def __init__(self, pdbfile=None, parse=True, sasmol=None, text=None):
"""
Initialise all of the variables to store header information, warnings
and errors.
If provided read in a pdbfile and if the parse flag is True then
process to populate the variables.
@type pdbfile: string
@param pdbfile: PDB filename
@type parse: boolean
@param pdbfile: PDB filename
"""
# All of the record types for which pdb_record_reader has schemas
self.head_types = rec_schemas.keys()
self.coord_types = ['ATOM',
'HETATM',
'TER',
'END',
'MODEL',
'ENDMDL',
'CONECT',
'ANISOU']
self.set_blank_values()
self.logger = logging.getLogger(__name__)
if pdbfile:
self.read_pdb(pdbfile)
elif sasmol:
self.process_sasmol_header(sasmol)
elif text:
self.process_header_text(text)
if (pdbfile or sasmol or text) and parse:
try:
self.parse_header()
except NoHeaderReadError:
pass
except:
if pdbfile:
raise ValueError(
'Unable to parse PDB header: {0:s}'.format(pdbfile))
else:
raise ValueError(
'Unable to parse PDB header: {0:s}'.format(sasmol.pdbname))
return
def set_blank_values(self):
"""
Blank values to be filled by processed header
"""
self.pdb_recs = {}
for rec_type in self.head_types:
self.pdb_recs[rec_type] = []
self.reference_info = ReferenceInfo()
self.chain_info = data_struct.Info(scan_type='chain')
self.read_valid_header = False
self.has_seq_info = False
return
def process_header_line(self, line):
"""
Process single line from a PDB header. Processed data is appended to
the appropriate list in self.pdb_recs dictionary (key is the record
type).
@type line: string
@param line: Line from a PDB header
"""
err = None
rec_type = line[0:6].strip()
if rec_type not in self.coord_types:
# Adjust records where naming discrepancy has occured
if rec_type[0:5] in ['MTRIX', 'ORIGX', 'SCALE']:
rec_type = rec_type[0:5] + 'n'
try:
# Parse line in to variables using schema
vals, err = parse_line(line, rec_schemas[rec_type])
self.pdb_recs[rec_type].append(vals)
except:
# If no schema available or parsing fails use NONSTD
# schema which just reads in the line as a text field
vals, err = parse_line(line, rec_schemas['NONSTD'])
self.pdb_recs['NONSTD'].append(vals)
if err:
err_txt = '\n'.join(err)
raise IOError(
'Unable to parse PDB header line:\n{0:s}\nError:\n{1:s}'.format(line, err_txt))
return
def process_header_text(self, header_txt):
"""
Parse list of header libes lines and place by record type in
self.pdb_recs dictionary
@type sasmol: sasmol.SasMol
@param sasmol: SasMol containing data read in from a PDB
"""
self.set_blank_values()
try:
for line in header_txt:
self.process_header_line(line)
except Exception as err:
py_err = str(err)
raise IOError(
'Unable to read header line from SasMol object: {0:s}\n{1:s}'.format(line, py_err))
return
def process_sasmol_header(self, sasmol):
"""
Read PDB file, parse lines and place by record type in self.pdb_recs
dictionary
@type sasmol: sasmol.SasMol
@param sasmol: SasMol containing data read in from a PDB
"""
header_txt = sasmol.header()
self.process_header_text(header_txt)
return
def read_pdb(self, pdbfile):
"""
Read PDB file, parse lines and place by card type in self.pdb_recs
dictionary
@type pdbfile: string
@param pdbfile: PDB filename
"""
self.set_blank_values()
# line count used for reporting purposes
line_no = 1
try:
with open(pdbfile, 'r') as f:
for line in f:
self.process_header_line(line)
line_no += 1
except IOError as err:
raise IOError('Unable to read PDB: {0:s}, line {1:d}\n{2:s}'.format(
pdbfile, line_no, err))
return
def is_obsolete(self):
"""
Is the PDB obsolete according to the header?
@rtype: boolean
@return: Flag to say PDB is noted as being obsolete in header
"""
obsolete = False
if 'OBSLTE' in self.pdb_recs:
if self.pdb_recs['OBSLTE']:
obsolete = True
return obsolete
def is_split(self):
"""
Is the PDB part of a structure split across multiple files?
@rtype: boolean
@return: Flag to say PDB is noted as being part of a split
structure in the header
"""
split = False
if 'SPLIT' in self.pdb_recs:
if self.pdb_recs['SPLIT']:
split = True
return split
def parse_header(self):
"""
Parse all header records in self.pdb_recs.
Populate self.reference_info and the following attributes of
self.chain_info: sequence, missing_resids, missing_atoms, heterogens,
disulphides and n_models.
"""
pdb_recs = self.pdb_recs
has_header_info = sum(len(v) for v in pdb_recs.itervalues())
if has_header_info:
if pdb_recs['NUMMDL']:
self.logger.info('Multiple models (' +
str(pdb_recs['NUMMDL'][0]['no_models']) +
') detected in the file.')
self.chain_info.n_models = int(
pdb_recs['NUMMDL'][0]['no_models'])
if pdb_recs['TITLE']:
self.reference_info.title = process_runon_line(
pdb_recs['TITLE'], 'text')
# SEQRES records contain full sequence information
self.process_seqres()
# Remarks contain quality metrics, BIOMT and missing residue/atom
# information
self.parse_remarks()
# Citation information
self.parse_jrnl()
if pdb_recs['SSBOND']:
self.process_disulphides()
# Information about non-standard residues
self.process_header_het()
self.process_compnd()
self.read_valid_header = True
self.has_seq_info = self.check_header_seq()
else:
raise NoHeaderReadError("No header has been read into object")
return
def check_header_seq(self):
"""
Check to see if parsed header information included a sequence for one
or more chains
@rtype : boolean
@return : Flag if sequence information has been read in
@todo: Need to ensure this is an adequate check, could probably check
if missing_resids is consistent with seqres
"""
has_seq = False
if self.chain_info.sequence:
has_seq = True
return has_seq
def parse_remarks(self):
"""
Parse all REMARK records.
Extracts missing residue (self.chain_info.missing_resids)
missing atom (self.chain_info.missing_atoms),
BIOMT (self.chain_info.biomt) and experimental quality metrics
(self.reference_info.metrics).
"""
if self.pdb_recs['REMARK']:
self.get_quality_metrics()
self.process_biomolecule()
self.process_missing_res()
self.process_missing_atoms()
else:
self.logger.info('No REMARK lines found in header:')
self.logger.info(
'BIOMT and missing residues cannot be evaluated')
return
def process_missing_res(self):
"""
Parse REMARK 465 records from a PDB to obtain missing residues.
Populate self.chain_info.missing_resids with a dictionary of the form:
{model_no:{chain: [{resid:resname}, ..]}}
"""
#missing_resids = self.chain_info.missing_resids
chain_info = self.chain_info
remarks465 = [x for x in self.pdb_recs['REMARK'] if x['num'] == 465]
# Extract missing residue data from text field of REMARK 465 records
missing_rec = self._remark465_missing_residues(remarks465)
# Create a dictionay for the form:
# missing_resids = {model_no:{chain: {resid:resname}, ..}}
# from the parsed REMARK lines
for model, grpd in itertools.groupby(
missing_rec, key=lambda x: x['model']):
for chain, residues in itertools.groupby(
grpd, key=lambda y: y['chain']):
residues = list(residues)
resids = [x['resid'] for x in residues]
resnames = [x['resname'] for x in residues]
chain_info.add_missing_resids(chain, resids, resnames, model)
n_missing = chain_info.no_missing_resids(subdiv=chain,
model_no=model)
self.logger.info(
str(n_missing) +
' missing residues in chain ' +
chain)
return
def _remark465_missing_residues(self, remarks465):
"""
Extract information from PDB REMARK 465 records for further processing
using the schema:
missing_schema = (
('model', 0, 3, None), # 0, model
('resname', 4, 7, None), # 1, residue name
('chain', 8, None, None), # 2, chain ID
('resid', 9, 16, int), # 3, residue number
('insert', 16, None, None), # 4, insertion code
)
@type remarks465 : list
@param remarks465: List containing PDB REMARK records after basic
parsing
@rtype : list
@return : List of dictionaries extracting data from the
text field of the original PDB REMARK 465 records
"""
# Schema is for parsing the text section extracted from the original
# REMARK record not the original PDB record
missing_schema = (
('model', 0, 3, None), # 0, model
('resname', 4, 7, None), # 1, residue name
('chain', 8, None, None), # 2, chain ID
('resid', 9, 16, int), # 3, residue number
('insert', 16, None, None), # 4, insertion code
)
missing_rec = []
# Useful records are preceeded by a header but also possibly other
# nonsense - only start reading after get to usable lines
start = False
for remark in remarks465:
if start:
rec, err = parse_line(remark['text'], missing_schema)
if err:
self.logger.warning(
'Possible malformed missing residue remark: ' +
remark['text'])
else:
try:
rec['model'] = int(rec['model'])
except:
rec['model'] = 1
missing_rec.append(rec)
elif remark['text'].startswith(' MODELS'):
self.logger.warning(
'Missing report for NMR' +
remark['text'].lower() +
'\n')
# Check for header of the residue list
# Note: M column not used as not in new NMR table format
elif 'RES C SSSEQI' in remark['text']:
start = True
return missing_rec
def process_missing_atoms(self):
"""
Parse REMARK 470 records from a PDB to obtain missing atoms.
Populate self.chain_info.missing_atoms with a dictionary of the form:
missing_atms = {model_no:{chain: {resid: {'atoms':[atm1, atm2, ...],'resname': resname}}}}
"""
missing_atoms = self.chain_info.missing_atoms
remarks470 = [x for x in self.pdb_recs['REMARK'] if x['num'] == 470]
missing_rec = self._remark470_missing_atoms(remarks470)
# Create a dictionay for the form:
# missing_atoms = {model_no:{chain: {resid:
# {'atoms':[atm1, atm2, ...],
# 'resname': resname}
# }}}
# from the parsed REMARK lines
for model, grpd in itertools.groupby(
missing_rec, key=lambda x: x['model']):
missing_atoms[model] = {}
for chain, resids in itertools.groupby(
grpd, key=lambda y: y['chain']):
missing_atoms[model][chain] = {}
for res in resids:
missing_atoms[model][chain][res['resid']] = {
'resname': res['resname'],
'atoms': res['atoms']
}
return
def _remark470_missing_atoms(self, remarks470):
"""
Extract information from PDB REMARK 470 records for further processing
using schema:
missing_schema = (
('model', 0, 3, None), # 0, model
('resname', 4, 7, None), # 1, residue name
('chain', 8, None, None), # 2, chain ID
('resid', 9, 16, int), # 3, residue number
('insert', 16, 16, None), # 4, insertion code
('atoms', 17, 80, None), # 5, atom names
)
@type remarks470 : list
@param remarks470: List containing PDB REMARK records after basic
parsing
@rtype : list
@return : List of dictionaries extracting data from the
text field of the original PDB REMARK 470 records
"""
# Schema is for parsing the text section extracted from the original REMARK
# record not the original PDB record
missing_schema = (
('model', 0, 3, None), # 0, model
('resname', 4, 7, None), # 1, residue name
('chain', 8, None, None), # 2, chain ID
('resid', 9, 16, int), # 3, residue number
('insert', 16, 16, None), # 4, insertion code
('atoms', 17, 80, None), # 5, atom names
)
missing_rec = []
# Useful records are preceeded by a header but also possibly other
# nonsense
start = False
for remark in remarks470:
if start:
rec, err = parse_line(remark['text'], missing_schema)
rec['atoms'] = remark['text'][17:-1].split()
if err:
self.logger.warning(
'Possible malformed missing atom remark: ' +
remark['text'])
else:
try:
rec['model'] = int(rec['model'])
except:
rec['model'] = 1
missing_rec.append(rec)
elif 'RES CSSEQI' in remark['text']:
start = True
return missing_rec
def process_seqres(self):
"""
Parse SEQRES records to provide a sequence for each chain
Populate self.chain_info.sequence as a dictionary of the form:
{chain: [resname, ..]}
resnames are three letter codes taken from SEQRES records
"""
seqres_recs = self.pdb_recs['SEQRES']
# Group SEQRES records by chain
for chain, grpd in itertools.groupby(
seqres_recs, key=lambda x: x['chain']):
#self.chain_info.sequence[chain] = []
chain_seq = []
for entry in grpd:
#self.chain_info.sequence[chain] += entry['resnames']
chain_seq += entry['resnames']
self.chain_info.add_subdiv_sequence(chain, chain_seq)
return
def get_quality_metrics(self):
"""
Parse REMARK 2 and 3 records from PDB to obtain R values and resolution
Store in self.reference_info.metrics as a dictionary containing:
'resolution', 'r' and 'r free'
"""
remarks = self.pdb_recs['REMARK']
metrics = {}
for remark in remarks:
text = remark['text']
if remark['num'] == 2:
if len(text.split()) != 0:
try:
metrics['resolution'] = float(text[15:19])
except:
metrics['resolution'] = None
if remark['num'] == 3:
if 'R VALUE (WORKING SET) :' in remark['text']:
try:
metrics['r'] = float(text.split()[5])
except:
metrics['r'] = None
elif 'FREE R VALUE :' in remark['text']:
try:
metrics['r_free'] = float(text.split()[4])
except:
metrics['r_free'] = None
self.reference_info.metrics = metrics
return
def process_biomolecule(self):
"""
Parse REMARK 300 and 350 records from PDB to obtain biological unit
specification.
Populate self.chain_info.biomt with the following format:
biomt[biomol_no] = {
'subdivs' : [],
'auth_bio_unit' : '',
'soft_bio_unit' : '',
'rot' : [],
'trans' : []
}
rot = list of np.array((3,3))
trans = list of np.array(3)
"""
biomt = self.chain_info.biomt
# REMARK 300 section is a free text description of any biomolecules
# We extract a list of the numberical biomolecule labels described
biomol_300 = self._parse_biomol_300()
# REMARK 350 section contains the transforms for each biomolecule and
# information on how the description was arrived at.
# Transformation read in from BIOMT records
self._parse_biomol_350()
if len(biomol_300) != len(biomt.keys()):
self.logger.warning(
'No. biomolecules suggested in REMARK 300 and supplied in REMARK 350 records are inconsistent!')
if biomt:
self.logger.warning(
'BIOMT present - PDB may not represent the biological unit')
return
def _parse_biomol_300(self):
"""
Parse REMARK 300 records from PDB to obtain list of biological unit
labels which we expect to find specified in the REMARK 350 records.
"""
biomol_300 = []
remarks300 = [x for x in self.pdb_recs['REMARK'] if x['num'] == 300]
# REMARK 300 section is a free text description of any biomolecules
# described in REMARK 350 records
# We just want the biomolecule ID numbers to be described
for remark in remarks300:
if remark['text'].startswith('BIOMOLECULE:'):
for biomol_no in remark['text'][13:].split(','):
biomol_300.append(int(biomol_no))
return biomol_300
def _parse_biomol_350(self):
"""
Parse REMARK 350 records from PDB to obtain biological unit
specification.
Populate self.chain_info.biomt with the following format:
biomt[biomol_no] = {
'subdivs' : [],
'auth_bio_unit' : '',
'soft_bio_unit' : '',
'rot' : [],
'trans' : []
}
rot = list of np.array((3,3))
trans = list of np.array(3)
"""
chain_info = self.chain_info
biomt = self.chain_info.biomt
remarks350 = [x for x in self.pdb_recs['REMARK'] if x['num'] == 350]
in_biomol = False
# REMARK 350 records contain data on origin of the biomolecule
# description along with the transformations needed to create it from
# the ATOM/HETATM coordinates
for remark in remarks350:
content = remark['text']
if content.startswith('BIOMOLECULE:'):
bm_no = int(content[13:])
chain_info.create_biomol(bm_no, subdivs=[])
in_biomol = True
last_seen_row = None
elif in_biomol and content.strip():
if content.startswith('AUTHOR DETERMINED'):
biomt[bm_no]['auth_bio_unit'] = content.split(':')[
1].strip()
elif content.startswith('SOFTWARE DETERMINED'):
biomt[bm_no]['soft_bio_unit'] = content.split(':')[
1].strip()
elif content[0:31] in ['APPLY THE FOLLOWING TO CHAINS: ',
' AND CHAINS: ']:
for chain in content[31:].split(','):
if chain != ' ':
biomt[bm_no]['subdivs'].append(chain.strip())
elif content.startswith(' BIOMT'):
# If we have not yet read any rows from a BIOMT matrix line
# initialize r(otation) and t(ranslation) arrays
if last_seen_row == None:
r = np.identity(3)
t = np.zeros(3)
# Read BIOMT record and parse to add information to r & t
last_seen_row = self._add_biomt_row(
content, last_seen_row, r, t)
# If we have finished reading a transformation add it to
# the chain_info object
if last_seen_row == None:
chain_info.add_biomt(bm_no, r, t)
return
def _add_biomt_row(self, biomt_text, last_seen, rot, trans):
"""
Parse the BIOMT information from a PDB REMARK 350 record and add to
rotation matrix and translation vector.
Example record:
BIOMT1 1 1.000000 0.000000 0.000000 0.00000
Column 0 = Record name, number indicated transform row
Column 1 = Transform number within this biomolecule
Columns 2,3,4 = rotation matrix row
Column 5 = translation vector row
@type biomt_text : string
@param biomt_text: Text containing content of the REMARK 350
containing a BIOMT line (record type and number
pre-stripped)
@type last_seen : integer
@param last_seen : Last row of the rotation/translation information
read or None if this should be the first line
"""
cols = biomt_text.split()
# Convert PDB row number to python zero based index for rot and trans
ndx = int(cols[0][-1]) - 1
# Should be first row or the one after the last read row
if (last_seen == None and ndx == 0) or (last_seen == ndx - 1):
rot[ndx] = np.array([float(cols[2]),
float(cols[3]),
float(cols[4])])
trans[ndx] = float(cols[5])
if ndx == 2:
# Only three rows - set last_seen for a new matric on next read
last_seen = None
else:
last_seen = ndx
else:
raise IncompleteBiomtError('Incomplete BIOMT matrix encountered')
return last_seen
def parse_jrnl(self):
"""
Parse an JRNL record line from a PDB. Extract the authors, title,
citation data, Pubmed IS and DOI for the primary citation if present.
Populates: self.reference_info.citation as a dictionary with keys based
on sub-record names:
{'authors': '', 'title': '', 'citation': '', 'pmid': '', 'doi': ''}
"""
jrnl_recs = self.pdb_recs['JRNL']
if jrnl_recs:
ref_data = {
'authors': '',
'title': '',
'citation': '',
'pmid': '',
'doi': '',
}
for rec in jrnl_recs:
rec_type = rec['text'][0:4].strip()
rec_val = rec['text'][7:]
if rec_type == 'AUTH':
if rec['text'][5] != ' ':
ref_data['authors'] += ',' + rec_val
else:
ref_data['authors'] = rec_val
elif rec_type == 'TITL':
if rec['text'][5] != ' ':
ref_data['title'] += ' ' + rec_val
else:
ref_data['title'] = rec_val
elif rec_type == 'REF':
ref_data['citation'] += ' '.join(rec_val.split())
elif rec_type == 'PMID':
ref_data['pmid'] = rec_val.strip()
elif rec_type == 'DOI':
ref_data['doi'] = rec_val.strip()
self.reference_info.citation = ref_data
return
def process_disulphides(self):
"""
Extract disulphide bond information from SSBOND records
Populates self.chain_info.disulphides as a list of
data_struct.Disulphide objects.
"""
ssbond_recs = self.pdb_recs['SSBOND']
if ssbond_recs:
for rec in ssbond_recs:
bond = data_struct.Disulphide(rec['chain1'], rec['resid1'],
rec['chain2'], rec['resid2'],
'chain')
self.chain_info.disulphides.append(bond)
return
def process_compnd(self):
"""
Parse COMPND records to get description of the macromolecular contents of
the PDB.
Populates self.reference_info.compnd dictionary with key/value pairs:
{mol_no: 'chains': [], 'name': '', 'fragment': '', 'type': 'protein'}
"""
compnds = {}
# Booleans used to determine whether we have finished reading a multi-line
# entry
open_name = False
open_fragment = False
for rec in self.pdb_recs['COMPND']:
content = rec['text']
# Title lines separate different molecule IDs
if 'MOL_ID:' in content:
mol_no = int(content.split(':')[1].strip(';'))
compnds[mol_no] = {
'chains': [],
'name': '',
'fragment': '',
'type': 'protein',
}
elif 'MOLECULE:' in content:
text = content.split(':')[1].strip()
if text:
if 'PROTEIN' in text:
compnds[mol_no]['type'] = 'protein'
elif text.startswith('RNA'):
compnds[mol_no]['type'] = 'rna'
elif text.startswith('DNA'):
compnds[mol_no]['type'] = 'dna'
# Complete entries end with a ';' run on lines do not
if text[-1] != ';':
open_name = True
else:
open_name = False
else:
open_name = True
compnds[mol_no]['name'] += text.strip(';')
elif 'CHAIN:' in content:
# CHAIN entry contains a comma separated list of chain IDs
chain_list = [c.strip() for c in content.strip(
';').strip(',')[6:].split(',')]
self.chain_info.subdivs = chain_list
compnds[mol_no]['chains'] = chain_list
elif 'FRAGMENT:' in content:
text = content.split(':')[1].strip()
if text[-1] != ';':
open_fragment = True
else:
open_fragment = False
compnds[mol_no]['fragment'] += text.strip(';')
elif open_name:
compnds[mol_no]['name'] += content.strip(';')
if content[-1] == ';':
open_name = False
elif open_fragment:
compnds[mol_no]['fragment'] += content.strip(';')
if content[-1] == ';':
open_fragment = False
self.reference_info.compnd = compnds
if 1 in compnds:
self.chain_info.chains = compnds[1]['chains']
return
def create_resid_dict(self, records):
"""
Create a dictionary mapping resnames to descriptions from a list of
dictionaries containing 'resname' and 'text' keys. Used to describe
HETATMs.
@type records: list
@param records: List of parsed PDB lines (dictionaries)
@rtype: dictionary
@return: Dictionary with residue name as key and text description
values
"""
dic = {}
for rec in records:
if rec['resname'] in dic:
dic[rec['resname']] += rec['text']
else:
dic[rec['resname']] = rec['text']
return dic
def process_header_het(self):
"""
Parse HETATM related records to get descriptions of the heterogens.
Populates self.chain_info.heterogens with a dictionary of the form:
{chain: {resid: resname}}
and self.reference_info.hets with:
{resname: text description}
and self.reference_info.formuls with:
{resname: text formula}
"""
pdb_recs = self.pdb_recs
formuls = self.reference_info.formuls
heterogens = self.chain_info.heterogens
self.reference_info.hets = self.create_resid_dict(pdb_recs['HETNAM'])
hets = self.reference_info.hets
formuls = self.create_resid_dict(pdb_recs['FORMUL'])
for het in pdb_recs['HET']:
if het['resname'] not in hets:
self.logger.info(
'HETATM ' +
het['resname'] +
' not given description in header')
if het['resname'] not in formuls:
self.logger.info(
'HETATM ' +
het['resname'] +
' formula not given in header')
chain = het['chain']
if not chain in heterogens:
heterogens[chain] = {het['resid']: het['resname']}
else:
heterogens[chain][het['resid']] = het['resname']
return
| madscatt/zazzie_1.5 | trunk/sassie/build/pdbscan/header_reader.py | Python | gpl-3.0 | 33,239 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import os.path
import csv
import codecs
import string
import random
from glob import glob
from datetime import datetime, time
import django.core.management
import alcide.settings
django.core.management.setup_environ(alcide.settings)
from alcide.ressources.models import ActType, Service
def _to_int(str_int):
if not str_int:
return None
return int(str_int)
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="iso8859-15", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
def main():
ActType.objects.all().delete()
for path in glob('./scripts/types_d_actes/*.csv'):
filename = os.path.basename(path)
service = Service.objects.get(name=filename[12:-4].replace('_', ' '))
csvfile = open(path, 'rb')
csvlines = UnicodeReader(csvfile, delimiter=',', quotechar='"',encoding='utf-8')
for line in csvlines:
a = ActType.objects.create(service=service,
old_id=line[0],
name=line[1],
billable=not bool(line[2]),
display_first=bool(line[3]))
print a.id, a.old_id, a.name, a.service
if __name__ == "__main__":
main()
| ZTH1970/alcide | scripts/import_types_actes.py | Python | agpl-3.0 | 1,917 |
try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import argparse
import collections
from functools import update_wrapper
import inspect
import itertools
import operator
import os
import re
import sys
import pytest
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
py2k = sys.version_info < (3, 0)
if py2k:
try:
import sqla_reinvent_fixtures as reinvent_fixtures_py2k
except ImportError:
from . import reinvent_fixtures_py2k
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(
self, parser, namespace, values, option_string=None
):
callback_(option_string, values, parser)
kw["action"] = CallableAction
zeroarg_callback = kw.pop("zeroarg_callback", None)
if zeroarg_callback:
class CallableAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=False,
required=False,
help=None, # noqa
):
super(CallableAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
default=default,
required=required,
help=help,
)
def __call__(
self, parser, namespace, values, option_string=None
):
zeroarg_callback(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
if hasattr(config, "workerinput"):
plugin_base.restore_important_follower_config(config.workerinput)
plugin_base.configure_follower(config.workerinput["follower_ident"])
else:
if config.option.write_idents and os.path.exists(
config.option.write_idents
):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(
bool(getattr(config.option, "cov_source", False))
)
plugin_base.set_fixture_functions(PytestFixtureFunctions)
if config.option.dump_pyannotate:
global DUMP_PYANNOTATE
DUMP_PYANNOTATE = True
DUMP_PYANNOTATE = False
@pytest.fixture(autouse=True)
def collect_types_fixture():
if DUMP_PYANNOTATE:
from pyannotate_runtime import collect_types
collect_types.start()
yield
if DUMP_PYANNOTATE:
collect_types.stop()
def pytest_sessionstart(session):
from sqlalchemy.testing import asyncio
asyncio._assume_async(plugin_base.post_begin)
def pytest_sessionfinish(session):
from sqlalchemy.testing import asyncio
asyncio._maybe_async_provisioning(plugin_base.final_process_cleanup)
if session.config.option.dump_pyannotate:
from pyannotate_runtime import collect_types
collect_types.dump_stats(session.config.option.dump_pyannotate)
def pytest_collection_finish(session):
if session.config.option.dump_pyannotate:
from pyannotate_runtime import collect_types
lib_sqlalchemy = os.path.abspath("lib/sqlalchemy")
def _filter(filename):
filename = os.path.normpath(os.path.abspath(filename))
if "lib/sqlalchemy" not in os.path.commonpath(
[filename, lib_sqlalchemy]
):
return None
if "testing" in filename:
return None
return filename
collect_types.init_types_collection(filter_filename=_filter)
if has_xdist:
import uuid
def pytest_configure_node(node):
from sqlalchemy.testing import provision
from sqlalchemy.testing import asyncio
# the master for each node fills workerinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.workerinput)
node.workerinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
asyncio._maybe_async_provisioning(
provision.create_follower_db, node.workerinput["follower_ident"]
)
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
from sqlalchemy.testing import asyncio
asyncio._maybe_async_provisioning(
provision.drop_follower_db, node.workerinput["follower_ident"]
)
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
from sqlalchemy.testing import asyncio
rebuilt_items = collections.defaultdict(
lambda: collections.defaultdict(list)
)
items[:] = [
item
for item in items
if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")
]
test_classes = set(item.parent for item in items)
def setup_test_classes():
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module
):
if sub_cls is not test_class.cls:
per_cls_dict = rebuilt_items[test_class.cls]
# support pytest 5.4.0 and above pytest.Class.from_parent
ctor = getattr(pytest.Class, "from_parent", pytest.Class)
for inst in ctor(
name=sub_cls.__name__, parent=test_class.parent.parent
).collect():
for t in inst.collect():
per_cls_dict[t.name].append(t)
# class requirements will sometimes need to access the DB to check
# capabilities, so need to do this for async
asyncio._maybe_async_provisioning(setup_test_classes)
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls][item.name])
else:
newitems.append(item)
if py2k:
for item in newitems:
reinvent_fixtures_py2k.scan_for_fixtures_to_use_for_class(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(
newitems,
key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name,
),
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(name, obj):
from sqlalchemy.testing import config
if config.any_async:
obj = _apply_maybe_async(obj)
ctor = getattr(pytest.Class, "from_parent", pytest.Class)
return [
ctor(name=parametrize_cls.__name__, parent=collector)
for parametrize_cls in _parametrize_cls(collector.module, obj)
]
elif (
inspect.isfunction(obj)
and isinstance(collector, pytest.Instance)
and plugin_base.want_method(collector.cls, obj)
):
# None means, fall back to default logic, which includes
# method-level parametrize
return None
else:
# empty list means skip this item
return []
def _is_wrapped_coroutine_function(fn):
while hasattr(fn, "__wrapped__"):
fn = fn.__wrapped__
return inspect.iscoroutinefunction(fn)
def _apply_maybe_async(obj, recurse=True):
from sqlalchemy.testing import asyncio
for name, value in vars(obj).items():
if (
(callable(value) or isinstance(value, classmethod))
and not getattr(value, "_maybe_async_applied", False)
and (name.startswith("test_"))
and not _is_wrapped_coroutine_function(value)
):
is_classmethod = False
if isinstance(value, classmethod):
value = value.__func__
is_classmethod = True
@_pytest_fn_decorator
def make_async(fn, *args, **kwargs):
return asyncio._maybe_async(fn, *args, **kwargs)
do_async = make_async(value)
if is_classmethod:
do_async = classmethod(do_async)
do_async._maybe_async_applied = True
setattr(obj, name, do_async)
if recurse:
for cls in obj.mro()[1:]:
if cls != object:
_apply_maybe_async(cls, False)
return obj
def _parametrize_cls(module, cls):
"""implement a class-based version of pytest parametrize."""
if "_sa_parametrize" not in cls.__dict__:
return [cls]
_sa_parametrize = cls._sa_parametrize
classes = []
for full_param_set in itertools.product(
*[params for argname, params in _sa_parametrize]
):
cls_variables = {}
for argname, param in zip(
[_sa_param[0] for _sa_param in _sa_parametrize], full_param_set
):
if not argname:
raise TypeError("need argnames for class-based combinations")
argname_split = re.split(r",\s*", argname)
for arg, val in zip(argname_split, param.values):
cls_variables[arg] = val
parametrized_name = "_".join(
# token is a string, but in py2k pytest is giving us a unicode,
# so call str() on it.
str(re.sub(r"\W", "", token))
for param in full_param_set
for token in param.id.split("-")
)
name = "%s_%s" % (cls.__name__, parametrized_name)
newcls = type.__new__(type, name, (cls,), cls_variables)
setattr(module, name, newcls)
classes.append(newcls)
return classes
_current_class = None
def pytest_runtest_setup(item):
from sqlalchemy.testing import asyncio
from sqlalchemy.util import string_types
if not isinstance(item, pytest.Function):
return
# pytest_runtest_setup runs *before* pytest fixtures with scope="class".
# plugin_base.start_test_class_outside_fixtures may opt to raise SkipTest
# for the whole class and has to run things that are across all current
# databases, so we run this outside of the pytest fixture system altogether
# and ensure asyncio greenlet if any engines are async
global _current_class
if _current_class is None:
asyncio._maybe_async_provisioning(
plugin_base.start_test_class_outside_fixtures,
item.parent.parent.cls,
)
_current_class = item.parent.parent
def finalize():
global _current_class, _current_report
_current_class = None
try:
asyncio._maybe_async_provisioning(
plugin_base.stop_test_class_outside_fixtures,
item.parent.parent.cls,
)
except Exception as e:
# in case of an exception during teardown attach the original
# error to the exception message, otherwise it will get lost
if _current_report.failed:
if not e.args:
e.args = (
"__Original test failure__:\n"
+ _current_report.longreprtext,
)
elif e.args[-1] and isinstance(e.args[-1], string_types):
args = list(e.args)
args[-1] += (
"\n__Original test failure__:\n"
+ _current_report.longreprtext
)
e.args = tuple(args)
else:
e.args += (
"__Original test failure__",
_current_report.longreprtext,
)
raise
finally:
_current_report = None
item.parent.parent.addfinalizer(finalize)
def pytest_runtest_call(item):
# runs inside of pytest function fixture scope
# before test function runs
from sqlalchemy.testing import asyncio
asyncio._maybe_async(
plugin_base.before_test,
item,
item.parent.module.__name__,
item.parent.cls,
item.name,
)
_current_report = None
def pytest_runtest_logreport(report):
global _current_report
if report.when == "call":
_current_report = report
def pytest_runtest_teardown(item, nextitem):
# runs inside of pytest function fixture scope
# after test function runs
from sqlalchemy.testing import asyncio
asyncio._maybe_async(plugin_base.after_test, item)
@pytest.fixture(scope="class")
def setup_class_methods(request):
from sqlalchemy.testing import asyncio
cls = request.cls
if hasattr(cls, "setup_test_class"):
asyncio._maybe_async(cls.setup_test_class)
if py2k:
reinvent_fixtures_py2k.run_class_fixture_setup(request)
yield
if py2k:
reinvent_fixtures_py2k.run_class_fixture_teardown(request)
if hasattr(cls, "teardown_test_class"):
asyncio._maybe_async(cls.teardown_test_class)
asyncio._maybe_async(plugin_base.stop_test_class, cls)
@pytest.fixture(scope="function")
def setup_test_methods(request):
from sqlalchemy.testing import asyncio
# called for each test
self = request.instance
# before this fixture runs:
# 1. function level "autouse" fixtures under py3k (examples: TablesTest
# define tables / data, MappedTest define tables / mappers / data)
# 2. run homegrown function level "autouse" fixtures under py2k
if py2k:
reinvent_fixtures_py2k.run_fn_fixture_setup(request)
# 3. run outer xdist-style setup
if hasattr(self, "setup_test"):
asyncio._maybe_async(self.setup_test)
# alembic test suite is using setUp and tearDown
# xdist methods; support these in the test suite
# for the near term
if hasattr(self, "setUp"):
asyncio._maybe_async(self.setUp)
# inside the yield:
# 4. function level fixtures defined on test functions themselves,
# e.g. "connection", "metadata" run next
# 5. pytest hook pytest_runtest_call then runs
# 6. test itself runs
yield
# yield finishes:
# 7. function level fixtures defined on test functions
# themselves, e.g. "connection" rolls back the transaction, "metadata"
# emits drop all
# 8. pytest hook pytest_runtest_teardown hook runs, this is associated
# with fixtures close all sessions, provisioning.stop_test_class(),
# engines.testing_reaper -> ensure all connection pool connections
# are returned, engines created by testing_engine that aren't the
# config engine are disposed
asyncio._maybe_async(plugin_base.after_test_fixtures, self)
# 10. run xdist-style teardown
if hasattr(self, "tearDown"):
asyncio._maybe_async(self.tearDown)
if hasattr(self, "teardown_test"):
asyncio._maybe_async(self.teardown_test)
# 11. run homegrown function-level "autouse" fixtures under py2k
if py2k:
reinvent_fixtures_py2k.run_fn_fixture_teardown(request)
# 12. function level "autouse" fixtures under py3k (examples: TablesTest /
# MappedTest delete table data, possibly drop tables and clear mappers
# depending on the flags defined by the test class)
def getargspec(fn):
if sys.version_info.major == 3:
return inspect.getfullargspec(fn)
else:
return inspect.getargspec(fn)
def _pytest_fn_decorator(target):
"""Port of langhelpers.decorator with pytest-specific tricks."""
from sqlalchemy.util.langhelpers import format_argspec_plus
from sqlalchemy.util.compat import inspect_getfullargspec
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def decorate(fn, add_positional_parameters=()):
spec = inspect_getfullargspec(fn)
if add_positional_parameters:
spec.args.extend(add_positional_parameters)
metadata = dict(
__target_fn="__target_fn", __orig_fn="__orig_fn", name=fn.__name__
)
metadata.update(format_argspec_plus(spec, grouped=False))
code = (
"""\
def %(name)s(%(args)s):
return %(__target_fn)s(%(__orig_fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {"__target_fn": target, "__orig_fn": fn}, fn.__name__
)
if not add_positional_parameters:
decorated.__defaults__ = getattr(fn, "__func__", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
else:
# this is the pytest hacky part. don't do a full update wrapper
# because pytest is really being sneaky about finding the args
# for the wrapped function
decorated.__module__ = fn.__module__
decorated.__name__ = fn.__name__
if hasattr(fn, "pytestmark"):
decorated.pytestmark = fn.pytestmark
return decorated
return decorate
class PytestFixtureFunctions(plugin_base.FixtureFunctions):
def skip_test_exception(self, *arg, **kw):
return pytest.skip.Exception(*arg, **kw)
def mark_base_test_class(self):
return pytest.mark.usefixtures(
"setup_class_methods", "setup_test_methods"
)
_combination_id_fns = {
"i": lambda obj: obj,
"r": repr,
"s": str,
"n": lambda obj: obj.__name__
if hasattr(obj, "__name__")
else type(obj).__name__,
}
def combinations(self, *arg_sets, **kw):
"""Facade for pytest.mark.parametrize.
Automatically derives argument names from the callable which in our
case is always a method on a class with positional arguments.
ids for parameter sets are derived using an optional template.
"""
from sqlalchemy.testing import exclusions
if sys.version_info.major == 3:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"):
arg_sets = list(arg_sets[0])
else:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"):
arg_sets = list(arg_sets[0])
argnames = kw.pop("argnames", None)
def _filter_exclusions(args):
result = []
gathered_exclusions = []
for a in args:
if isinstance(a, exclusions.compound):
gathered_exclusions.append(a)
else:
result.append(a)
return result, gathered_exclusions
id_ = kw.pop("id_", None)
tobuild_pytest_params = []
has_exclusions = False
if id_:
_combination_id_fns = self._combination_id_fns
# because itemgetter is not consistent for one argument vs.
# multiple, make it multiple in all cases and use a slice
# to omit the first argument
_arg_getter = operator.itemgetter(
0,
*[
idx
for idx, char in enumerate(id_)
if char in ("n", "r", "s", "a")
]
)
fns = [
(operator.itemgetter(idx), _combination_id_fns[char])
for idx, char in enumerate(id_)
if char in _combination_id_fns
]
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
parameters = _arg_getter(fn_params)[1:]
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(
parameters,
param_exclusions,
"-".join(
comb_fn(getter(arg)) for getter, comb_fn in fns
),
)
)
else:
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(fn_params, param_exclusions, None)
)
pytest_params = []
for parameters, param_exclusions, id_ in tobuild_pytest_params:
if has_exclusions:
parameters += (param_exclusions,)
param = pytest.param(*parameters, id=id_)
pytest_params.append(param)
def decorate(fn):
if inspect.isclass(fn):
if has_exclusions:
raise NotImplementedError(
"exclusions not supported for class level combinations"
)
if "_sa_parametrize" not in fn.__dict__:
fn._sa_parametrize = []
fn._sa_parametrize.append((argnames, pytest_params))
return fn
else:
if argnames is None:
_argnames = getargspec(fn).args[1:]
else:
_argnames = re.split(r", *", argnames)
if has_exclusions:
_argnames += ["_exclusions"]
@_pytest_fn_decorator
def check_exclusions(fn, *args, **kw):
_exclusions = args[-1]
if _exclusions:
exlu = exclusions.compound().add(*_exclusions)
fn = exlu(fn)
return fn(*args[0:-1], **kw)
def process_metadata(spec):
spec.args.append("_exclusions")
fn = check_exclusions(
fn, add_positional_parameters=("_exclusions",)
)
return pytest.mark.parametrize(_argnames, pytest_params)(fn)
return decorate
def param_ident(self, *parameters):
ident = parameters[0]
return pytest.param(*parameters[1:], id=ident)
def fixture(self, *arg, **kw):
from sqlalchemy.testing import config
from sqlalchemy.testing import asyncio
# wrapping pytest.fixture function. determine if
# decorator was called as @fixture or @fixture().
if len(arg) > 0 and callable(arg[0]):
# was called as @fixture(), we have the function to wrap.
fn = arg[0]
arg = arg[1:]
else:
# was called as @fixture, don't have the function yet.
fn = None
# create a pytest.fixture marker. because the fn is not being
# passed, this is always a pytest.FixtureFunctionMarker()
# object (or whatever pytest is calling it when you read this)
# that is waiting for a function.
fixture = pytest.fixture(*arg, **kw)
# now apply wrappers to the function, including fixture itself
def wrap(fn):
if config.any_async:
fn = asyncio._maybe_async_wrapper(fn)
# other wrappers may be added here
if py2k and "autouse" in kw:
# py2k workaround for too-slow collection of autouse fixtures
# in pytest 4.6.11. See notes in reinvent_fixtures_py2k for
# rationale.
# comment this condition out in order to disable the
# py2k workaround entirely.
reinvent_fixtures_py2k.add_fixture(fn, fixture)
else:
# now apply FixtureFunctionMarker
fn = fixture(fn)
return fn
if fn:
return wrap(fn)
else:
return wrap
def get_current_test_name(self):
return os.environ.get("PYTEST_CURRENT_TEST")
def async_test(self, fn):
from sqlalchemy.testing import asyncio
@_pytest_fn_decorator
def decorate(fn, *args, **kwargs):
asyncio._run_coroutine_function(fn, *args, **kwargs)
return decorate(fn)
| monetate/sqlalchemy | lib/sqlalchemy/testing/plugin/pytestplugin.py | Python | mit | 25,560 |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 29 01:54:01 2015
@author: Zlati
"""
from distutils.core import setup
from setuptools import find_packages
setup(name='pyportfolio',
version='0.0.1',
description='Package for online/offline portfolio optimisation',
url="https://github.com/zlatiadam/PyPortfolio",
download_url="",
author="Ádám Zlatniczki",
author_email="[email protected]",
license='GNU General Public License 2.0',
packages=find_packages(),
package_data={},
keywords=["portfolio", "optimisation", "online", "offline", "backtest",
"robust"],
install_requires=["pandas", "numpy", "cvxopt", "scipy", "statsmodels",
"sklearn", "requests"],
zip_safe=False) | zlatiadam/PyPortfolio | setup.py | Python | gpl-2.0 | 771 |
from nose.tools import assert_equal
from nose.tools import assert_is
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from nose.tools import raises
import networkx
class BaseGraphTester(object):
""" Tests for data-structure independent graph class features."""
def test_contains(self):
G=self.K3
assert(1 in G )
assert(4 not in G )
assert('b' not in G )
assert([] not in G ) # no exception for nonhashable
assert({1:1} not in G) # no exception for nonhashable
def test_order(self):
G=self.K3
assert_equal(len(G),3)
assert_equal(G.order(),3)
assert_equal(G.number_of_nodes(),3)
def test_nodes(self):
G=self.K3
assert_equal(sorted(G.nodes()),self.k3nodes)
assert_equal(sorted(G.nodes(data=True)),[(0,{}),(1,{}),(2,{})])
def test_has_node(self):
G=self.K3
assert(G.has_node(1))
assert(not G.has_node(4))
assert(not G.has_node([])) # no exception for nonhashable
assert(not G.has_node({1:1})) # no exception for nonhashable
def test_has_edge(self):
G=self.K3
assert_equal(G.has_edge(0,1),True)
assert_equal(G.has_edge(0,-1),False)
def test_neighbors(self):
G=self.K3
assert_equal(sorted(G.neighbors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.neighbors,-1)
def test_edges(self):
G=self.K3
assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,2)])
assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
f=lambda x:list(G.edges(x))
assert_raises((KeyError,networkx.NetworkXError), f, -1)
def test_weighted_degree(self):
G=self.Graph()
G.add_edge(1,2,weight=2)
G.add_edge(2,3,weight=3)
assert_equal(list(d for n, d in G.degree(weight='weight')), [2, 5, 3])
assert_equal(dict(G.degree(weight='weight')), {1: 2, 2: 5, 3: 3})
assert_equal(G.degree(1,weight='weight'), 2)
assert_equal(list(G.degree([1],weight='weight')), [(1, 2)])
def test_degree(self):
G=self.K3
assert_equal(list(G.degree()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.degree()),{0:2,1:2,2:2})
assert_equal(G.degree(0), 2)
def test_size(self):
G=self.K3
assert_equal(G.size(),3)
assert_equal(G.number_of_edges(),3)
def test_nbunch_iter(self):
G=self.K3
assert_equal(list(G.nbunch_iter()),self.k3nodes) # all nodes
assert_equal(list(G.nbunch_iter(0)),[0]) # single node
assert_equal(list(G.nbunch_iter([0,1])),[0,1]) # sequence
# sequence with none in graph
assert_equal(list(G.nbunch_iter([-1])),[])
# string sequence with none in graph
assert_equal(list(G.nbunch_iter("foo")),[])
# node not in graph doesn't get caught upon creation of iterator
bunch=G.nbunch_iter(-1)
# but gets caught when iterator used
assert_raises(networkx.NetworkXError,list,bunch)
# unhashable doesn't get caught upon creation of iterator
bunch=G.nbunch_iter([0,1,2,{}])
# but gets caught when iterator hits the unhashable
assert_raises(networkx.NetworkXError,list,bunch)
@raises(networkx.NetworkXError)
def test_nbunch_iter_node_format_raise(self):
"""Tests that a node that would have failed string formatting
doesn't cause an error when attempting to raise a
:exc:`networkx.NetworkXError`.
For more information, see pull request #1813.
"""
G = self.Graph()
nbunch = [('x', set())]
list(G.nbunch_iter(nbunch))
def test_selfloop_degree(self):
G=self.Graph()
G.add_edge(1, 1)
assert_equal(list(G.degree()), [(1, 2)])
assert_equal(dict(G.degree()), {1: 2})
assert_equal(G.degree(1), 2)
assert_equal(list(G.degree([1])), [(1, 2)])
assert_equal(G.degree(1, weight='weight'), 2)
def test_selfloops(self):
G=self.K3.copy()
G.add_edge(0,0)
assert_equal(list(G.nodes_with_selfloops()), [0])
assert_equal(list(G.selfloop_edges()), [(0, 0)])
assert_equal(G.number_of_selfloops(),1)
G.remove_edge(0,0)
G.add_edge(0,0)
G.remove_edges_from([(0,0)])
G.add_edge(1,1)
G.remove_node(1)
G.add_edge(0,0)
G.add_edge(1,1)
G.remove_nodes_from([0,1])
class BaseAttrGraphTester(BaseGraphTester):
""" Tests of graph class attribute features."""
def test_weighted_degree(self):
G=self.Graph()
G.add_edge(1,2,weight=2,other=3)
G.add_edge(2,3,weight=3,other=4)
assert_equal(list(d for n, d in G.degree(weight='weight')), [2, 5, 3])
assert_equal(dict(G.degree(weight='weight')),{1: 2, 2: 5, 3: 3})
assert_equal(G.degree(1,weight='weight'), 2)
assert_equal(list(G.degree([1],weight='weight')), [(1, 2)])
assert_equal(list(d for n, d in G.degree(weight='other')), [3, 7, 4])
assert_equal(dict(G.degree(weight='other')),{1: 3, 2: 7, 3: 4})
assert_equal(G.degree(1,weight='other'), 3)
assert_equal(list(G.degree([1],weight='other')), [(1, 3)])
def add_attributes(self,G):
G.graph['foo']=[]
G.node[0]['foo']=[]
G.remove_edge(1,2)
ll=[]
G.add_edge(1,2,foo=ll)
G.add_edge(2,1,foo=ll)
# attr_dict must be dict
assert_raises(networkx.NetworkXError,G.add_edge,0,1,attr_dict=[])
def test_name(self):
G=self.Graph(name='')
assert_equal(G.name,"")
G=self.Graph(name='test')
assert_equal(G.__str__(),"test")
assert_equal(G.name,"test")
def test_copy(self):
G = self.Graph()
G.add_node(0)
G.add_edge(1, 2)
self.add_attributes(G)
# deepcopy
H = G.copy()
self.is_deepcopy(H, G)
def test_class_copy(self):
G = self.Graph()
G.add_node(0)
G.add_edge(1, 2)
self.add_attributes(G)
# copy edge datadict but any container attr are same
H = G.__class__(G)
self.graphs_equal(H,G)
self.different_attrdict(H, G)
self.shallow_copy_attrdict(H,G)
def test_attr_reference(self):
G = self.Graph()
G.add_node(0)
G.add_edge(1, 2)
self.add_attributes(G)
# copy datadict by reference (with_data=False)
H = G.copy(with_data=False)
self.graphs_equal(H,G)
self.same_attrdict(H, G)
self.shallow_copy_attrdict(H,G)
def test_fresh_copy(self):
G = self.Graph()
G.add_node(0)
G.add_edge(1, 2)
self.add_attributes(G)
# copy graph structure but use fresh datadict
H = G.__class__()
H.add_nodes_from(G)
H.add_edges_from(G.edges())
assert_equal(len(G.node[0]), 1)
ddict = G.adj[1][2][0] if G.is_multigraph() else G.adj[1][2]
assert_equal(len(ddict), 1)
assert_equal(len(H.node[0]), 0)
ddict = H.adj[1][2][0] if H.is_multigraph() else H.adj[1][2]
assert_equal(len(ddict), 0)
def is_deepcopy(self,H,G):
self.graphs_equal(H,G)
self.different_attrdict(H,G)
self.deep_copy_attrdict(H,G)
def deep_copy_attrdict(self,H,G):
self.deepcopy_graph_attr(H,G)
self.deepcopy_node_attr(H,G)
self.deepcopy_edge_attr(H,G)
def deepcopy_graph_attr(self,H,G):
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_not_equal(G.graph['foo'],H.graph['foo'])
def deepcopy_node_attr(self,H,G):
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_not_equal(G.node[0]['foo'],H.node[0]['foo'])
def deepcopy_edge_attr(self,H,G):
assert_equal(G[1][2]['foo'],H[1][2]['foo'])
G[1][2]['foo'].append(1)
assert_not_equal(G[1][2]['foo'],H[1][2]['foo'])
def is_shallow_copy(self,H,G):
self.graphs_equal(H,G)
self.shallow_copy_attrdict(H,G)
def shallow_copy_attrdict(self,H,G):
self.shallow_copy_graph_attr(H,G)
self.shallow_copy_node_attr(H,G)
self.shallow_copy_edge_attr(H,G)
def shallow_copy_graph_attr(self,H,G):
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_equal(G.graph['foo'],H.graph['foo'])
def shallow_copy_node_attr(self,H,G):
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
def shallow_copy_edge_attr(self,H,G):
assert_equal(G[1][2]['foo'],H[1][2]['foo'])
G[1][2]['foo'].append(1)
assert_equal(G[1][2]['foo'],H[1][2]['foo'])
def same_attrdict(self, H, G):
old_foo=H[1][2]['foo']
H.add_edge(1,2,foo='baz')
assert_equal(G.edge,H.edge)
H.add_edge(1,2,foo=old_foo)
assert_equal(G.edge,H.edge)
old_foo=H.node[0]['foo']
H.node[0]['foo']='baz'
assert_equal(G.node,H.node)
H.node[0]['foo']=old_foo
assert_equal(G.node,H.node)
def different_attrdict(self, H, G):
old_foo=H[1][2]['foo']
H.add_edge(1,2,foo='baz')
assert_not_equal(G.edge,H.edge)
H.add_edge(1,2,foo=old_foo)
assert_equal(G.edge,H.edge)
old_foo=H.node[0]['foo']
H.node[0]['foo']='baz'
assert_not_equal(G.node,H.node)
H.node[0]['foo']=old_foo
assert_equal(G.node,H.node)
def graphs_equal(self,H,G):
assert_equal(G.adj,H.adj)
assert_equal(G.edge,H.edge)
assert_equal(G.node,H.node)
assert_equal(G.graph,H.graph)
assert_equal(G.name,H.name)
if not G.is_directed() and not H.is_directed():
assert_true(H.adj[1][2] is H.adj[2][1])
assert_true(G.adj[1][2] is G.adj[2][1])
else: # at least one is directed
if not G.is_directed():
G.pred=G.adj
G.succ=G.adj
if not H.is_directed():
H.pred=H.adj
H.succ=H.adj
assert_equal(G.pred,H.pred)
assert_equal(G.succ,H.succ)
assert_true(H.succ[1][2] is H.pred[2][1])
assert_true(G.succ[1][2] is G.pred[2][1])
def test_graph_attr(self):
G=self.K3
G.graph['foo']='bar'
assert_equal(G.graph['foo'], 'bar')
del G.graph['foo']
assert_equal(G.graph, {})
H=self.Graph(foo='bar')
assert_equal(H.graph['foo'], 'bar')
def test_node_attr(self):
G = self.K3
G.add_node(1, foo='bar')
assert_equal(list(G.nodes()), [0, 1, 2])
assert_equal(list(G.nodes(data=True)),
[(0, {}), (1, {'foo':'bar'}), (2, {})])
G.node[1]['foo'] = 'baz'
assert_equal(list(G.nodes(data=True)),
[(0, {}), (1, {'foo':'baz'}), (2, {})])
assert_equal(list(G.nodes(data='foo')),
[(0, None), (1, 'baz'), (2, None)])
assert_equal(list(G.nodes(data='foo', default='bar')),
[(0, 'bar'), (1, 'baz'), (2, 'bar')])
def test_node_attr2(self):
G=self.K3
a={'foo':'bar'}
G.add_node(3,attr_dict=a)
assert_equal(list(G.nodes()), [0, 1, 2, 3])
assert_equal(list(G.nodes(data=True)),
[(0, {}), (1, {}), (2, {}), (3, {'foo':'bar'})])
def test_edge_attr(self):
G=self.Graph()
G.add_edge(1,2,foo='bar')
assert_equal(list(G.edges(data=True)), [(1,2,{'foo':'bar'})])
assert_equal(list(G.edges(data='foo')), [(1,2,'bar')])
def test_edge_attr2(self):
G=self.Graph()
G.add_edges_from([(1,2),(3,4)],foo='foo')
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'foo':'foo'}),(3,4,{'foo':'foo'})])
assert_equal(sorted(G.edges(data='foo')),
[(1,2,'foo'),(3,4,'foo')])
def test_edge_attr3(self):
G=self.Graph()
G.add_edges_from([(1,2,{'weight':32}),(3,4,{'weight':64})],foo='foo')
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'foo':'foo','weight':32}),\
(3,4,{'foo':'foo','weight':64})])
G.remove_edges_from([(1,2),(3,4)])
G.add_edge(1,2,data=7,spam='bar',bar='foo')
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'data':7,'spam':'bar','bar':'foo'})])
def test_edge_attr4(self):
G=self.Graph()
G.add_edge(1,2,data=7,spam='bar',bar='foo')
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'data':7,'spam':'bar','bar':'foo'})])
G[1][2]['data']=10 # OK to set data like this
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'data':10,'spam':'bar','bar':'foo'})])
G.edge[1][2]['data']=20 # another spelling, "edge"
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'data':20,'spam':'bar','bar':'foo'})])
G.edge[1][2]['listdata']=[20,200]
G.edge[1][2]['weight']=20
assert_equal(sorted(G.edges(data=True)),
[(1,2,{'data':20,'spam':'bar',
'bar':'foo','listdata':[20,200],'weight':20})])
def test_attr_dict_not_dict(self):
# attr_dict must be dict
G=self.Graph()
edges=[(1,2)]
assert_raises(networkx.NetworkXError,G.add_edges_from,edges,
attr_dict=[])
def test_to_undirected(self):
G=self.K3
self.add_attributes(G)
H=networkx.Graph(G)
self.is_shallow_copy(H,G)
self.different_attrdict(H,G)
H=G.to_undirected()
self.is_deepcopy(H,G)
def test_to_directed(self):
G=self.K3
self.add_attributes(G)
H=networkx.DiGraph(G)
self.is_shallow_copy(H,G)
self.different_attrdict(H,G)
H=G.to_directed()
self.is_deepcopy(H,G)
def test_subgraph(self):
G=self.K3
self.add_attributes(G)
H=G.subgraph([0,1,2,5])
# assert_equal(H.name, 'Subgraph of ('+G.name+')')
H.name=G.name
self.graphs_equal(H,G)
self.same_attrdict(H,G)
self.shallow_copy_attrdict(H,G)
H=G.subgraph(0)
assert_equal(H.adj,{0:{}})
H=G.subgraph([])
assert_equal(H.adj,{})
assert_not_equal(G.adj,{})
def test_selfloops_attr(self):
G=self.K3.copy()
G.add_edge(0,0)
G.add_edge(1,1,weight=2)
assert_equal(list(G.selfloop_edges(data=True)),
[(0, 0, {}), (1, 1, {'weight':2})])
assert_equal(list(G.selfloop_edges(data='weight')),
[(0, 0, None), (1, 1, 2)])
class TestGraph(BaseAttrGraphTester):
"""Tests specific to dict-of-dict-of-dict graph data structure"""
def setUp(self):
self.Graph=networkx.Graph
# build dict-of-dict-of-dict K3
ed1,ed2,ed3 = ({},{},{})
self.k3adj={0: {1: ed1, 2: ed2},
1: {0: ed1, 2: ed3},
2: {0: ed2, 1: ed3}}
self.k3edges=[(0, 1), (0, 2), (1, 2)]
self.k3nodes=[0, 1, 2]
self.K3=self.Graph()
self.K3.adj=self.K3.edge=self.k3adj
self.K3.node={}
self.K3.node[0]={}
self.K3.node[1]={}
self.K3.node[2]={}
def test_data_input(self):
G=self.Graph(data={1:[2],2:[1]}, name="test")
assert_equal(G.name,"test")
assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
G=self.Graph({1:[2],2:[1]}, name="test")
assert_equal(G.name,"test")
assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
def test_adjacency(self):
G=self.K3
assert_equal(dict(G.adjacency()),
{0: {1: {}, 2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
def test_getitem(self):
G=self.K3
assert_equal(G[0],{1: {}, 2: {}})
assert_raises(KeyError, G.__getitem__, 'j')
assert_raises((TypeError,networkx.NetworkXError), G.__getitem__, ['A'])
def test_add_node(self):
G=self.Graph()
G.add_node(0)
assert_equal(G.adj,{0:{}})
# test add attributes
G.add_node(1,c='red')
G.add_node(2,{'c':'blue'})
G.add_node(3,{'c':'blue'},c='red')
assert_raises(networkx.NetworkXError, G.add_node, 4, [])
assert_raises(networkx.NetworkXError, G.add_node, 4, 4)
assert_equal(G.node[1]['c'],'red')
assert_equal(G.node[2]['c'],'blue')
assert_equal(G.node[3]['c'],'red')
# test updating attributes
G.add_node(1,c='blue')
G.add_node(2,{'c':'red'})
G.add_node(3,{'c':'red'},c='blue')
assert_equal(G.node[1]['c'],'blue')
assert_equal(G.node[2]['c'],'red')
assert_equal(G.node[3]['c'],'blue')
def test_add_nodes_from(self):
G=self.Graph()
G.add_nodes_from([0,1,2])
assert_equal(G.adj,{0:{},1:{},2:{}})
# test add attributes
G.add_nodes_from([0,1,2],c='red')
assert_equal(G.node[0]['c'],'red')
assert_equal(G.node[2]['c'],'red')
# test that attribute dicts are not the same
assert(G.node[0] is not G.node[1])
# test updating attributes
G.add_nodes_from([0,1,2],c='blue')
assert_equal(G.node[0]['c'],'blue')
assert_equal(G.node[2]['c'],'blue')
assert(G.node[0] is not G.node[1])
# test tuple input
H=self.Graph()
H.add_nodes_from(G.nodes(data=True))
assert_equal(H.node[0]['c'],'blue')
assert_equal(H.node[2]['c'],'blue')
assert(H.node[0] is not H.node[1])
# specific overrides general
H.add_nodes_from([0,(1,{'c':'green'}),(3,{'c':'cyan'})],c='red')
assert_equal(H.node[0]['c'],'red')
assert_equal(H.node[1]['c'],'green')
assert_equal(H.node[2]['c'],'blue')
assert_equal(H.node[3]['c'],'cyan')
def test_remove_node(self):
G=self.K3
G.remove_node(0)
assert_equal(G.adj,{1:{2:{}},2:{1:{}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_node,-1)
# generator here to implement list,set,string...
def test_remove_nodes_from(self):
G=self.K3
G.remove_nodes_from([0,1])
assert_equal(G.adj,{2:{}})
G.remove_nodes_from([-1]) # silent fail
def test_add_edge(self):
G=self.Graph()
G.add_edge(0,1)
assert_equal(G.adj,{0: {1: {}}, 1: {0: {}}})
G=self.Graph()
G.add_edge(*(0,1))
assert_equal(G.adj,{0: {1: {}}, 1: {0: {}}})
def test_add_edges_from(self):
G=self.Graph()
G.add_edges_from([(0,1),(0,2,{'weight':3})])
assert_equal(G.adj,{0: {1:{}, 2:{'weight':3}}, 1: {0:{}}, \
2:{0:{'weight':3}}})
G=self.Graph()
G.add_edges_from([(0,1),(0,2,{'weight':3}),(1,2,{'data':4})],data=2)
assert_equal(G.adj,{\
0: {1:{'data':2}, 2:{'weight':3,'data':2}}, \
1: {0:{'data':2}, 2:{'data':4}}, \
2: {0:{'weight':3,'data':2}, 1:{'data':4}} \
})
assert_raises(networkx.NetworkXError,
G.add_edges_from,[(0,)]) # too few in tuple
assert_raises(networkx.NetworkXError,
G.add_edges_from,[(0,1,2,3)]) # too many in tuple
assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
def test_remove_edge(self):
G=self.K3
G.remove_edge(0,1)
assert_equal(G.adj,{0:{2:{}},1:{2:{}},2:{0:{},1:{}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_remove_edges_from(self):
G=self.K3
G.remove_edges_from([(0,1)])
assert_equal(G.adj,{0:{2:{}},1:{2:{}},2:{0:{},1:{}}})
G.remove_edges_from([(0,0)]) # silent fail
def test_clear(self):
G=self.K3
G.clear()
assert_equal(G.adj,{})
def test_edges_data(self):
G=self.K3
assert_equal(sorted(G.edges(data=True)),[(0,1,{}),(0,2,{}),(1,2,{})])
assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
f = lambda x: list(G.edges(x))
assert_raises((KeyError,networkx.NetworkXError), f,-1)
def test_get_edge_data(self):
G=self.K3
assert_equal(G.get_edge_data(0,1),{})
assert_equal(G[0][1],{})
assert_equal(G.get_edge_data(10,20),None)
assert_equal(G.get_edge_data(-1,0),None)
assert_equal(G.get_edge_data(-1,0,default=1),1)
class TestEdgeSubgraph(object):
"""Unit tests for the :meth:`Graph.edge_subgraph` method."""
def setup(self):
# Create a path graph on five nodes.
G = networkx.path_graph(5)
# Add some node, edge, and graph attributes.
for i in range(5):
G.node[i]['name'] = 'node{}'.format(i)
G.edge[0][1]['name'] = 'edge01'
G.edge[3][4]['name'] = 'edge34'
G.graph['name'] = 'graph'
# Get the subgraph induced by the first and last edges.
self.G = G
self.H = G.edge_subgraph([(0, 1), (3, 4)])
def test_correct_nodes(self):
"""Tests that the subgraph has the correct nodes."""
assert_equal([0, 1, 3, 4], sorted(self.H.nodes()))
def test_correct_edges(self):
"""Tests that the subgraph has the correct edges."""
assert_equal([(0, 1, 'edge01'), (3, 4, 'edge34')],
sorted(self.H.edges(data='name')))
def test_add_node(self):
"""Tests that adding a node to the original graph does not
affect the nodes of the subgraph.
"""
self.G.add_node(5)
assert_equal([0, 1, 3, 4], sorted(self.H.nodes()))
def test_remove_node(self):
"""Tests that removing a node in the original graph does not
affect the nodes of the subgraph.
"""
self.G.remove_node(0)
assert_equal([0, 1, 3, 4], sorted(self.H.nodes()))
def test_node_attr_dict(self):
"""Tests that the node attribute dictionary of the two graphs is
the same object.
"""
for v in self.H:
assert_equal(self.G.node[v], self.H.node[v])
# Making a change to G should make a change in H and vice versa.
self.G.node[0]['name'] = 'foo'
assert_equal(self.G.node[0], self.H.node[0])
self.H.node[1]['name'] = 'bar'
assert_equal(self.G.node[1], self.H.node[1])
def test_edge_attr_dict(self):
"""Tests that the edge attribute dictionary of the two graphs is
the same object.
"""
for u, v in self.H.edges():
assert_equal(self.G.edge[u][v], self.H.edge[u][v])
# Making a change to G should make a change in H and vice versa.
self.G.edge[0][1]['name'] = 'foo'
assert_equal(self.G.edge[0][1]['name'],
self.H.edge[0][1]['name'])
self.H.edge[3][4]['name'] = 'bar'
assert_equal(self.G.edge[3][4]['name'],
self.H.edge[3][4]['name'])
def test_graph_attr_dict(self):
"""Tests that the graph attribute dictionary of the two graphs
is the same object.
"""
assert_is(self.G.graph, self.H.graph)
| NvanAdrichem/networkx | networkx/classes/tests/test_graph.py | Python | bsd-3-clause | 23,512 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
'''
This module provides Demo sql queries for the treatment_fills table
'''
from PyQt4 import QtSql
from lib_openmolar.common.db_orm import InsertableRecord
TABLENAME = "treatment_fills"
class DemoGenerator(object):
def __init__(self, database=None):
self.length = 1
self.record = InsertableRecord(database, TABLENAME)
self.tooth_tx_id = 0
q_query= QtSql.QSqlQuery(
"select ix from treatment_teeth where treatment_id=2", database)
if q_query.first():
self.tooth_tx_id = q_query.value(0).toInt()[0]
def demo_queries(self):
'''
return a list of queries to populate a demo database
'''
self.record.setValue('tooth_tx_id', self.tooth_tx_id)
self.record.setValue('surfaces', "MO")
self.record.setValue('material', "AM")
sql = self.record.insert_query
yield sql
if __name__ == "__main__":
from lib_openmolar.admin.connect import DemoAdminConnection
sc = DemoAdminConnection()
sc.connect()
builder = DemoGenerator(sc)
print builder.demo_queries()
| rowinggolfer/openmolar2 | src/lib_openmolar/admin/db_orm/admin_treatment_fills.py | Python | gpl-3.0 | 2,596 |
from django.conf.urls import re_path
from django.contrib import admin
from .media_library.views import VideoFormView
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^$', VideoFormView.as_view()),
]
| escaped/django-video-encoding | test_proj/urls.py | Python | bsd-3-clause | 224 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
import hjson
import os
from binascii import b2a_hex
import boto3
import datetime
import json
from ipwhois import IPWhois
from mozdef_util.utilities.logger import logger
from mozdef_util.utilities.toUTC import toUTC
class message(object):
def __init__(self):
'''
sends geomodel alert to SSO dashboard
'''
self.alert_classname = 'AlertGeomodel'
config_file_path = os.path.join(os.path.dirname(__file__), 'dashboard_geomodel.json')
json_obj = {}
with open(config_file_path, "r") as fd:
try:
json_obj = hjson.load(fd)
except ValueError:
logger.error("FAILED to open the configuration file" + str(config_file_path))
self.config = json_obj
self.connect_db()
self.registration = 'geomodel'
self.priority = 1
def connect_db(self):
boto_session = boto3.session.Session(
aws_access_key_id=self.config['aws_access_key_id'],
aws_secret_access_key=self.config['aws_secret_access_key'],
region_name=self.config['aws_region']
)
dynamodb_resource = boto_session.resource('dynamodb')
table = dynamodb_resource.Table(self.config['db_table'])
self.dynamodb = table
def write_db_entry(self, alert_record):
self.dynamodb.put_item(Item=alert_record)
def onMessage(self, alert):
# As of Dec. 3, 2019, alert actions are given entire alerts rather
# than just their source
message = alert['_source']
if 'details' not in message:
return message
if 'principal' not in message['details']:
return message
if 'category' not in message['details']:
return message
if message['details']['category'].lower() != 'newcountry':
return message
full_email = message['details']['principal']
username = full_email.split('@')[0]
auth_full_username = self.config['auth_id_prefix'] + username
if 'city' not in message['details']['locality_details']:
return message
if 'country' not in message['details']['locality_details']:
return message
if 'source_ip' not in message['details']:
return message
city = message['details']['locality_details']['city']
country = message['details']['locality_details']['country']
source_ip = message['details']['source_ip']
new_ip_info = ""
try:
whois = IPWhois(source_ip).lookup_whois()
whois_str = whois['nets'][0]['description']
source_ip_isp = whois_str.replace('\n', ', ').replace('\r', '')
new_ip_info = '{} ({})'.format(source_ip, source_ip_isp)
except Exception:
new_ip_info = '{}'.format(source_ip)
new_location_str = ""
if city.lower() == 'unknown':
new_location_str += '{0}'.format(country)
else:
new_location_str += '{0}, {1}'.format(city, country)
event_timestamp = toUTC(message['events'][0]['documentsource']['details']['event_time'])
event_day = event_timestamp.strftime('%B %d, %Y')
summary = 'On {0} (UTC), did you login from {1} ({2})?'.format(event_day, new_location_str, source_ip)
previous_city = message['details']['previous_locality_details']['city']
previous_country = message['details']['previous_locality_details']['country']
if previous_city.lower() == 'unknown':
previous_location_str = '{0}'.format(previous_country)
else:
previous_location_str = '{0}, {1}'.format(previous_city, previous_country)
alert_record = {
'alert_id': b2a_hex(os.urandom(15)).decode(),
'alert_code': b2a_hex(self.alert_classname.encode()).decode(),
'user_id': auth_full_username,
'risk': self.config['risk'],
'summary': summary,
'description': self.config['description'],
'date': str(datetime.date.today()),
'url': self.config['url'],
'url_title': self.config['url_title'],
'duplicate': self.config['duplicate'],
'alert_str_json': json.dumps(message),
'details': {
'Timestamp': event_timestamp.strftime('%A, %B %d %Y %H:%M UTC'),
'New Location': new_location_str,
'New IP': new_ip_info,
'Previous Location': previous_location_str
}
}
self.write_db_entry(alert_record)
return message
| jeffbryner/MozDef | alerts/actions/dashboard_geomodel.py | Python | mpl-2.0 | 4,861 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the main executable of the mesos-cli unit tests.
"""
import unittest
from termcolor import colored
if __name__ == '__main__':
print colored("Running the Mesos CLI unit tests", "yellow")
unittest.main(verbosity=2, testRunner=unittest.TextTestRunner)
| craimbert/mesos | src/cli_new/tests/main.py | Python | apache-2.0 | 1,058 |
import hashlib
from django.db import models
from django.conf import settings
from openid.store.interface import OpenIDStore
import openid.store
from openid.association import Association as OIDAssociation
import time, base64
class Nonce(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
def __unicode__(self):
return u"Nonce: %s for %s" % (self.salt, self.server_url)
class Association(models.Model):
server_url = models.TextField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.TextField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField(max_length=64)
def __unicode__(self):
return u"Association: %s, %s" % (self.server_url, self.handle)
class DjangoOpenIDStore(OpenIDStore):
"""
The Python openid library needs an OpenIDStore subclass to persist data
related to OpenID authentications. This one uses our Django models.
"""
def storeAssociation(self, server_url, association):
assoc = Association(
server_url = server_url,
handle = association.handle,
secret = base64.encodestring(association.secret),
issued = association.issued,
lifetime = association.issued,
assoc_type = association.assoc_type
)
assoc.save()
def getAssociation(self, server_url, handle=None):
assocs = []
if handle is not None:
assocs = Association.objects.filter(
server_url = server_url, handle = handle
)
else:
assocs = Association.objects.filter(
server_url = server_url
)
if not assocs:
return None
associations = []
for assoc in assocs:
association = OIDAssociation(
assoc.handle, base64.decodestring(assoc.secret), assoc.issued,
assoc.lifetime, assoc.assoc_type
)
if association.getExpiresIn() == 0:
self.removeAssociation(server_url, assoc.handle)
else:
associations.append((association.issued, association))
if not associations:
return None
return associations[-1][1]
def removeAssociation(self, server_url, handle):
assocs = list(Association.objects.filter(
server_url = server_url, handle = handle
))
assocs_exist = len(assocs) > 0
for assoc in assocs:
assoc.delete()
return assocs_exist
def useNonce(self, server_url, timestamp, salt):
# Has nonce expired?
if abs(timestamp - time.time()) > openid.store.nonce.SKEW:
return False
try:
nonce = Nonce.objects.get(
server_url__exact = server_url,
timestamp__exact = timestamp,
salt__exact = salt
)
except Nonce.DoesNotExist:
nonce = Nonce.objects.create(
server_url = server_url,
timestamp = timestamp,
salt = salt
)
return True
nonce.delete()
return False
def cleanupNonce(self):
Nonce.objects.filter(
timestamp__lt = (int(time.time()) - nonce.SKEW)
).delete()
def cleaupAssociations(self):
Association.objects.extra(
where=['issued + lifetimeint < (%s)' % time.time()]
).delete()
def getAuthKey(self):
# Use first AUTH_KEY_LEN characters of md5 hash of SECRET_KEY
return hashlib.md5.new(settings.SECRET_KEY).hexdigest()[:self.AUTH_KEY_LEN]
def isDumb(self):
return False
# Only include table for User->OpenID associations if User model is installed
from django.contrib.auth.models import User
if User._meta.installed:
class UserOpenidAssociation(models.Model):
"Auth integration - lets you associate 1+ OpenIDs with a User"
user = models.ForeignKey('auth.User', related_name = 'openids')
openid = models.CharField(max_length = 255)
created = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return u'%s can log in with %s' % (self.user, self.openid)
| fgirault/smeuhsocial | apps/django_openid/models.py | Python | mit | 4,459 |
from Elements.G.Populators.Base.IElementPopulator import IElementPopulator
class OneEntryForTemplateWithNamePopulator(IElementPopulator):
def __init__(self, sectionName, templates, settingsObject):
super(OneEntryForTemplateWithNamePopulator, self).__init__(sectionName, templates, settingsObject)
self.mainName = self.templateName.split('-')[1]
def getType(self):
return "CreateOneEntryForTemplateWithName"
def openClause(self, name):
return self.parameters["ModulePrefix"] + "_" + name.lower() + ":\n"
def closeClause(self):
return "\n;\n"
def createParameters(self):
parameter = {}
parametersForExpansion = [ "ModuleShortName", "ModuleLongName", "ModulePrefix" ]
for parameterName in parametersForExpansion:
parameter[parameterName] = self.parameters[parameterName]
parameter["PostFix"] = self.settingName.lower()
return parameter
def populate(self):
result = self.openClause(self.mainName)
result += self.templateResolver.fill(self.createParameters())
result += self.closeClause()
return result | afronski/grammar-generator | grammar-generator/Elements/G/Populators/OneEntryForTemplateWithNamePopulator.py | Python | mit | 1,044 |
def stringToNGrams(s, n=3):
d = dict()
for i in range(len(s)):
for j in range(n):
sub_str = s[i:i+j+1]
if sub_str in d:
d[sub_str] += 1
else:
d[sub_str] = 1
return d
def findChineseText(line):
import re
return "".join(re.findall('[\u4e00-\u9fff]+', line))
def processText(file, min_freq=25):
d = dict()
with open(file, "r", encoding="utf-8") as f:
for line in f:
text = findChineseText(line)
string_d = stringToNGrams(text, 7)
for k, v in string_d.items():
if k in d:
d[k] += v
else:
d[k] = v
output = file.split(".")[0] + "_out.txt"
d = {k: v for k, v in sorted(d.items(), key=lambda item: item[1])}
with open(output, "w", encoding="utf-8") as f:
for k, v in d.items():
if v < min_freq:
break
f.write(f"{k}\t{v}\n")
| mewturn/Python | ngram_gen.py | Python | mit | 1,025 |
from flask import Flask, render_template, request, redirect, Response, session
from config import Config as cfg
import requests
import json
app = Flask(__name__, template_folder="templates")
app.debug = True
app.secret_key = cfg.SECRET_KEY
@app.route('/products', methods=['GET'])
def products():
""" Get a stores products """
headers = {
"X-Shopify-Access-Token": session.get("access_token"),
"Content-Type": "application/json"
}
endpoint = "/admin/products.json"
response = requests.get("https://{0}{1}".format(session.get("shop"),
endpoint), headers=headers)
if response.status_code == 200:
products = json.loads(response.text)
print(products)
return render_template('products.html', products=products.get("products"))
else:
return False
@app.route('/install', methods=['GET'])
def install():
"""
Connect a shopify store
"""
if request.args.get('shop'):
shop = request.args.get('shop')
else:
return Response(response="Error:parameter shop not found", status=500)
auth_url = "https://{0}/admin/oauth/authorize?client_id={1}&scope={2}&redirect_uri={3}".format(
shop, cfg.SHOPIFY_CONFIG["API_KEY"], cfg.SHOPIFY_CONFIG["SCOPE"],
cfg.SHOPIFY_CONFIG["REDIRECT_URI"]
)
print("Debug - auth URL: ", auth_url)
return redirect(auth_url)
@app.route('/connect', methods=['GET'])
def connect():
if request.args.get("shop"):
params = {
"client_id": cfg.SHOPIFY_CONFIG["API_KEY"],
"client_secret": cfg.SHOPIFY_CONFIG["API_SECRET"],
"code": request.args.get("code")
}
resp = requests.post(
"https://{0}/admin/oauth/access_token".format(
request.args.get("shop")
),
data=params
)
if 200 == resp.status_code:
resp_json = json.loads(resp.text)
session['access_token'] = resp_json.get("access_token")
session['shop'] = request.args.get("shop")
return render_template('welcome.html', from_shopify=resp_json)
else:
print "Failed to get access token: ", resp.status_code, resp.text
return render_template('error.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| johnpaulhayes/shopify_app_development_tutorials | tutoral_3_using_access_token.py | Python | unlicense | 2,388 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : [email protected]
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# VEX.NDS.L1.0F.W0 41 /r
# KANDW k1, k2, k3
myVEX = VEX('VEX.NDS.L1.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}41cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x41')
assert_equal(myDisasm.infos.Reserved_.VEX.L, 1)
assert_equal(myDisasm.infos.Reserved_.REX.W_, 0)
assert_equal(myDisasm.infos.Reserved_.MOD_, 3)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kandw')
assert_equal(myDisasm.repr(), 'kandw k1, k2, k3')
# VEX.L1.66.0F.W0 41 /r
# KANDB k1, k2, k3
myVEX = VEX('VEX.L1.66.0F.W0')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}41cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x41')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kandb')
assert_equal(myDisasm.repr(), 'kandb k1, k2, k3')
# VEX.L1.0F.W1 41 /r
# KANDQ k1, k2, k3
myVEX = VEX('VEX.L1.0F.W1')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}41cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x41')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kandq')
assert_equal(myDisasm.repr(), 'kandq k1, k2, k3')
# VEX.L1.66.0F.W1 41 /r
# KANDD k1, k2, k3
myVEX = VEX('VEX.L1.66.0F.W1')
myVEX.vvvv = 0b1101
myVEX.R = 1
Buffer = bytes.fromhex('{}41cb'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x41')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'kandd')
assert_equal(myDisasm.repr(), 'kandd k1, k2, k3')
| 0vercl0k/rp | src/third_party/beaengine/tests/0f41.py | Python | mit | 2,850 |
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
from .client import ServerCommunication, ConnectionWatchdog, Receiver
from .manager import ManagerEventHandler
from .smtp import SMTPAlert
from .update import Updater
from .globalData import ManagerObjOption, ManagerObjNode, ManagerObjSensor, ManagerObjManager, ManagerObjAlert, \
ManagerObjAlertLevel
from .globalData import GlobalData
| sqall01/alertR | managerClientTemplate/lib/__init__.py | Python | agpl-3.0 | 556 |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py register')
os.system('python setup.py sdist upload')
sys.exit()
setup(
name="Confopy",
version="0.4.11",
url="https://github.com/ooz/Confopy",
author="Oliver Zscheyge",
author_email="[email protected]",
packages=["confopy", "confopy.analysis", "confopy.localization", "confopy.model", "confopy.pdfextract", "confopy.localization.de", "confopy.localization.de.corpus_de"],
license="MIT License",
description="Evaluates the linguistic and structural quality of scientific texts.",
long_description=open("README.md").read(),
package_dir={"confopy.model": "confopy/model"},
package_data={"": ["README.md", "bin/confopy"],
"confopy.model": ["confopy_document.xsd"]},
include_package_data=True,
scripts=["bin/confopy"],
data_files = ["README.md"],
install_requires=[
"lxml >= 3.3.5",
"numpy == 1.6.2",
"nltk >= 3.0.0",
"Pattern == 2.6",
"pyenchant == 1.6.5",
"pdfminer == 20110515",
],
)
# formerly used lxml 2.3.2
# pyenchant is for spell checking
# other maybe deps:
#"pyyaml ==",
| ooz/Confopy | setup.py | Python | mit | 1,303 |
# (c) 2013, Michael DeHaan <[email protected]>
# Stephen Fromm <[email protected]>
# Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import os
import os.path
import pipes
import shutil
import tempfile
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def _assemble_from_fragments(self, src_path, delimiter=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
for f in sorted(os.listdir(src_path)):
fragment = "%s/%s" % (src_path, f)
if delimit_me and delimiter:
tmp.write(delimiter)
if os.path.isfile(fragment):
tmp.write(file(fragment).read())
delimit_me = True
tmp.close()
return temp_path
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
delimiter = options.get('delimiter', None)
remote_src = options.get('remote_src', True)
if src is None or dest is None:
result = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, comm_ok=False, result=result)
if remote_src:
return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter)
pathmd5 = utils.md5s(path)
remote_md5 = self.runner._remote_md5(conn, tmp, dest)
if pathmd5 != remote_md5:
resultant = file(path).read()
if self.runner.diff:
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
if 'content' in dest_result.result:
dest_contents = dest_result.result['content']
if dest_result.result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
# fix file permissions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root':
self.runner._low_level_exec_command(conn, "chmod a+r %s" % xfered, tmp)
# run the copy module
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(src)))
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
else:
res = self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject)
res.diff = dict(after=resultant)
return res
else:
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(src)))
return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject)
| bezhermoso/home | lib/ansible/runner/action_plugins/assemble.py | Python | gpl-3.0 | 4,340 |
import tempfile
import shutil
import jedi
collect_ignore = ["setup.py"]
# The following hooks (pytest_configure, pytest_unconfigure) are used
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
# has no effect during doctests. Without these hooks, doctests uses
# user's cache (e.g., ~/.cache/jedi/). We should remove this
# workaround once the problem is fixed in py.test.
#
# See:
# - https://github.com/davidhalter/jedi/pull/168
# - https://bitbucket.org/hpk42/pytest/issue/275/
jedi_cache_directory_orig = None
jedi_cache_directory_temp = None
def pytest_configure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi_cache_directory_orig = jedi.settings.cache_directory
jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-')
jedi.settings.cache_directory = jedi_cache_directory_temp
def pytest_unconfigure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi.settings.cache_directory = jedi_cache_directory_orig
shutil.rmtree(jedi_cache_directory_temp)
| SamuelDSR/YouCompleteMe-Win7-GVIM | third_party/jedi/conftest.py | Python | gpl-3.0 | 1,070 |
# Home_Weather_Display.py
#
# This is an project for using the Grove RGB LCD Display and the Grove DHT Sensor from the GrovePi starter kit
#
# In this project, the Temperature and humidity from the DHT sensor is printed on the RGB-LCD Display
#
#
# Note the dht_sensor_type below may need to be changed depending on which DHT sensor you have:
# 0 - DHT11 - blue one - comes with the GrovePi+ Starter Kit
# 1 - DHT22 - white one, aka DHT Pro or AM2302
# 2 - DHT21 - black one, aka AM2301
#
# For more info please see: http://www.dexterindustries.com/topic/537-6c-displayed-in-home-weather-project/
#
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from grovepi import *
from grove_rgb_lcd import *
dht_sensor_port = 7 # Connect the DHt sensor to port 7
dht_sensor_type = 0 # change this depending on your sensor type - see header comment
while True:
try:
[ temp,hum ] = dht(dht_sensor_port,dht_sensor_type) #Get the temperature and Humidity from the DHT sensor
print("temp =", temp, "C\thumidity =", hum,"%")
t = str(temp)
h = str(hum)
setRGB(0,128,64)
setRGB(0,255,0)
setText("Temp:" + t + "C " + "Humidity :" + h + "%")
except (IOError,TypeError) as e:
print("Error")
| karan259/GrovePi | Projects/Home_Weather_Display/Home_Weather_Display.py | Python | mit | 2,370 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.serialization import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_notification_get_without_cluster(self):
notification = self.env.create_notification()
resp = self.app.get(
reverse(
'NotificationHandler',
kwargs={'obj_id': notification.id}
),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertIsNone(resp.json_body.get('cluster'))
self.assertEqual(notification.status, 'unread')
self.assertEqual(notification.id, resp.json_body['id'])
self.assertEqual(notification.status, resp.json_body['status'])
self.assertEqual(notification.topic, resp.json_body['topic'])
self.assertEqual(notification.message, resp.json_body['message'])
def test_notification_datetime(self):
self.env.create_node(
api=True,
meta=self.env.default_metadata()
)
resp = self.app.get(
reverse('NotificationCollectionHandler'),
headers=self.default_headers
)
notif_api = resp.json_body[0]
self.assertIn('date', notif_api)
self.assertNotEqual(notif_api['date'], '')
self.assertIn('time', notif_api)
self.assertNotEqual(notif_api['time'], '')
def test_notification_get_with_cluster(self):
cluster = self.env.create_cluster(api=False)
notification = self.env.create_notification(cluster_id=cluster.id)
resp = self.app.get(
reverse(
'NotificationHandler',
kwargs={'obj_id': notification.id}
),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.json_body.get('cluster'), cluster.id)
self.assertEqual(notification.status, 'unread')
self.assertEqual(notification.id, resp.json_body['id'])
self.assertEqual(notification.status, resp.json_body['status'])
self.assertEqual(notification.topic, resp.json_body['topic'])
self.assertEqual(notification.message, resp.json_body['message'])
def test_notification_update(self):
notification = self.env.create_notification()
notification_update = {
'status': 'read'
}
resp = self.app.put(
reverse(
'NotificationHandler',
kwargs={'obj_id': notification.id}
),
jsonutils.dumps(notification_update),
headers=self.default_headers
)
self.assertEqual(notification.id, resp.json_body['id'])
self.assertEqual('read', resp.json_body['status'])
self.assertEqual(notification.topic, resp.json_body['topic'])
self.assertEqual(notification.message, resp.json_body['message'])
def test_notification_not_found(self):
notification = self.env.create_notification()
resp = self.app.get(
reverse(
'NotificationHandler',
kwargs={'obj_id': notification.id + 1}
),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(404, resp.status_code)
| zhaochao/fuel-web | nailgun/nailgun/test/unit/test_notification_handler.py | Python | apache-2.0 | 3,983 |
#coding:utf-8
from service.service import Service
from dao.database import Database
from repository.task import TaskRepository
from repository.project import ProjectRepository
class TopDisplayService(Service):
def __init__(self):
pass
def execute(self):
db = Database()
task_repo = TaskRepository(db)
project_repo = ProjectRepository(db)
return {
"task_list" : task_repo.fetch_all(),
"project_list" : project_repo.fetch_all(),
}
| dev1x-org/python-example | lib/service/top/display.py | Python | mit | 511 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
from string import Template
from uuid import uuid4
import pytest
from moztelemetry.store import InMemoryStore
from moztelemetry.dataset import Dataset
from moztelemetry.spark import get_pings
@pytest.fixture()
def test_store(monkeypatch):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
with open(os.path.join(data_dir, 'schema.json')) as s:
schema = json.loads(s.read())
dimensions = [f['field_name'] for f in schema['dimensions']]
dataset = Dataset('test-bucket', dimensions, InMemoryStore('test-bucket'))
@staticmethod
def from_source(source_name):
return dataset
monkeypatch.setattr(Dataset, 'from_source', from_source)
return dataset.store
def upload_ping(store, value, **kwargs):
"""Upload value to a given store"""
ping_key_template = Template('$submission_date/$source_name/'
'$source_version/$doc_type/$app/$channel/'
'$version/$build_id/$filename')
dimensions = {
'submission_date': '20160805',
'source_name': 'telemetry',
'source_version': '4',
'doc_type': 'saved_session',
'app': 'Firefox',
'channel': 'nightly',
'version': '51.0a1',
'build_id': '20160801074053',
'filename': uuid4()
}
dimensions.update(kwargs)
key = ping_key_template.substitute(**dimensions)
store.store[key] = value
@pytest.fixture
def mock_message_parser(monkeypatch):
# monkeypatch the default `decoder` argument of `records`
monkeypatch.setattr('moztelemetry.heka_message_parser.parse_heka_message',
lambda message: (message.getvalue(),))
test_data_for_exact_match = [
('doc_type', 'saved_session', 'main'),
('app', 'Firefox', 'Thunderbird'),
('version', '48.0', '46.0'),
('source_name', 'telemetry', 'other source'),
('source_version', '4', '2'),
]
@pytest.mark.slow
@pytest.mark.parametrize('filter_name,exact,wrong', test_data_for_exact_match)
def test_get_pings_by_exact_match(test_store, mock_message_parser, spark_context,
filter_name, exact, wrong):
upload_ping(test_store, 'value1', **{filter_name: exact})
upload_ping(test_store, 'value2', **{filter_name: wrong})
pings = get_pings(spark_context, **{filter_name: exact})
assert pings.collect() == ['value1']
test_data_for_range_match = [
('submission_date', '20160110', '20150101', '20160101', '20160120'),
('build_id', '20160801074050', '20160801074055', '20160801074049', '20160801074052'),
]
@pytest.mark.slow
@pytest.mark.parametrize('filter_name,exact,wrong,start,end', test_data_for_range_match)
def test_get_pings_by_range(test_store, mock_message_parser, spark_context,
filter_name, exact, wrong, start, end):
upload_ping(test_store, 'value1', **{filter_name: exact})
upload_ping(test_store, 'value2', **{filter_name: wrong})
pings = get_pings(spark_context, **{filter_name: exact})
assert pings.collect() == ['value1']
pings = get_pings(spark_context, **{filter_name: (start, end)})
assert pings.collect() == ['value1']
@pytest.mark.slow
def test_get_pings_multiple_by_range(test_store, mock_message_parser, spark_context):
upload_ping(test_store, 'value1', **{f[0]: f[1] for f in test_data_for_range_match})
upload_ping(test_store, 'value2', **{f[0]: f[2] for f in test_data_for_range_match})
pings = get_pings(spark_context, **{f[0]: f[1] for f in test_data_for_range_match})
assert pings.collect() == ['value1']
pings = get_pings(spark_context, **{f[0]: (f[3], f[4]) for f in test_data_for_range_match})
assert pings.collect() == ['value1']
def test_get_pings_fraction(test_store, mock_message_parser, spark_context):
for i in range(1, 10+1):
upload_ping(test_store, 'value', build_id=str(i))
pings = get_pings(spark_context)
assert pings.count() == 10
pings = get_pings(spark_context, fraction=0.1)
assert pings.count() == 1
def test_get_pings_wrong_schema(test_store, mock_message_parser, spark_context):
with pytest.raises(ValueError):
pings = get_pings(spark_context, schema=1)
def test_get_pings_multiple_filters(test_store, mock_message_parser, spark_context):
filters = dict(submission_date='20160101', channel='beta')
upload_ping(test_store, 'value1', **filters)
filters['app'] = 'Thunderbird'
upload_ping(test_store, 'value2', **filters)
pings = get_pings(spark_context, **filters)
assert pings.collect() == ['value2']
def test_get_pings_none_filter(test_store, mock_message_parser, spark_context):
upload_ping(test_store, 'value1', app='Firefox')
upload_ping(test_store, 'value2', app='Thuderbird')
pings = get_pings(spark_context, app=None)
assert sorted(pings.collect()) == ['value1', 'value2']
pings = get_pings(spark_context, app='*')
assert sorted(pings.collect()) == ['value1', 'value2']
| whd/python_moztelemetry | tests/test_spark.py | Python | mpl-2.0 | 5,208 |
from PyPDF2 import PdfFileReader, PdfFileWriter
from rect import Rect
from rect.packer import pack
from reportlab.lib import pagesizes
from reportlab.lib.units import mm
__version__ = "0.1.0"
class PDFPagePacker(object):
def __init__(self, pdf_file, canvas_size=pagesizes.A4, padding=5 * mm):
super(PDFPagePacker, self).__init__()
self.pdf_file = pdf_file
self.canvas_size = canvas_size
self.inner_canvas_size = canvas_size[0] - 4 * padding, canvas_size[1] - 4 * padding
self.padding = padding
self.reader = PdfFileReader(self.pdf_file)
self.rects = list()
self.create_rect_page_dictionary()
@property
def page_count(self):
return self.reader.numPages
def create_rect_page_dictionary(self):
for page in self.reader.pages:
rect = Rect([page.mediaBox.getWidth(), page.mediaBox.getHeight()])
rect.page = page
self.rects.append(rect)
def pack(self):
def place_rects_and_append_to_pages(rects_to_place):
pages_to_place = [rect.page for rect in rects_to_place]
placed_rects = pack(self.inner_canvas_size, rects_to_place, self.padding)
for rect, page in zip(placed_rects, pages_to_place):
rect.page = page
if placed_rects:
pages.append(placed_rects)
items_to_place = list(self.rects)
rects_to_place = []
pages = []
while items_to_place:
try:
rect = items_to_place[0]
rects_to_place.append(rect)
pack(self.inner_canvas_size, rects_to_place, self.padding)
items_to_place.pop(0)
except ValueError, e:
if e.message == "Pack size too small.":
rects_to_place.pop()
place_rects_and_append_to_pages(rects_to_place)
rects_to_place = []
else:
raise
place_rects_and_append_to_pages(rects_to_place)
return pages
def get_packed_file(self, packed_file):
writer = PdfFileWriter()
scale = 1.0
for rects in self.pack():
page = writer.addBlankPage(*self.canvas_size)
for rect in rects:
y = self.canvas_size[1] - rect.top - 2 * self.padding
x = rect.left + 2 * self.padding
page.mergeScaledTranslatedPage(rect.page, scale, x, y)
writer.write(packed_file) | beyond-content/python-pdf-paper-saver | src/pdfpapersaver/__init__.py | Python | bsd-2-clause | 2,516 |
"""Views for REST APIs for moderators"""
from django.conf import settings
from rest_framework import status
from rest_framework.generics import ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from channels.api import Api
from channels.serializers.moderators import (
ModeratorPrivateSerializer,
ModeratorPublicSerializer,
)
from channels.utils import translate_praw_exceptions
from open_discussions.permissions import (
AnonymousAccessReadonlyPermission,
ModeratorPermissions,
is_moderator,
is_staff_user,
)
class ModeratorListView(ListCreateAPIView):
"""
View for listing and adding moderators
"""
permission_classes = (AnonymousAccessReadonlyPermission, ModeratorPermissions)
def get_serializer_class(self):
"""
Pick private serializer if user is moderator of this channel, else use public one
"""
return (
ModeratorPrivateSerializer
if (is_staff_user(self.request) or is_moderator(self.request, self))
else ModeratorPublicSerializer
)
def get_serializer_context(self):
"""Context for the request and view"""
channel_api = self.request.channel_api
channel_name = self.kwargs["channel_name"]
mods = list(
channel_api._list_moderators( # pylint: disable=protected-access
channel_name=channel_name, moderator_name=channel_api.user.username
)
)
if mods:
user_mod_date = mods[0].date
else:
user_mod_date = None
return {"channel_api": channel_api, "view": self, "mod_date": user_mod_date}
def get_queryset(self):
"""Get a list of moderators for channel"""
api = self.request.channel_api
channel_name = self.kwargs["channel_name"]
return sorted(
(
moderator
for moderator in api.list_moderators(channel_name)
if moderator.name != settings.INDEXING_API_USERNAME
),
key=lambda moderator: 0 if moderator.name == api.user.username else 1,
)
class ModeratorDetailView(APIView):
"""
View to retrieve and remove moderators
"""
permission_classes = (AnonymousAccessReadonlyPermission, ModeratorPermissions)
def delete(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Removes a moderator from a channel
"""
api = Api(user=request.user)
channel_name = self.kwargs["channel_name"]
moderator_name = self.kwargs["moderator_name"]
with translate_praw_exceptions(request.user):
api.remove_moderator(moderator_name, channel_name)
return Response(status=status.HTTP_204_NO_CONTENT)
| mitodl/open-discussions | channels/views/moderators.py | Python | bsd-3-clause | 2,829 |
"""
Miscellaneous.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
from rdkit import Chem
class PicklableMol(Chem.Mol):
"""
RDKit Mol that preserves molecule properties when pickling.
This class is similar to the PropertyMol class in RDKit. However, this
class can optionally preserve calculated properties.
Parameters
----------
mol : RDKit Mol, optional
Molecule to convert to PicklableMol.
preserve_computed : bool, optional (default True)
Whether to preserve computed properties when pickling.
"""
__getstate_manages_dict__ = True
def __init__(self, mol=None, preserve_computed=True):
if mol is None:
super(PicklableMol, self).__init__()
else:
super(PicklableMol, self).__init__(mol)
self.preserve_computed = preserve_computed
def __getstate__(self):
"""
Reduce the molecule and save property information.
"""
properties = {}
computed_properties = {}
for prop in self.GetPropNames(includePrivate=True):
properties[prop] = self.GetProp(prop)
for prop in self.GetPropNames(includePrivate=True,
includeComputed=True):
if prop not in properties:
try:
computed_properties[prop] = self.GetProp(prop)
except RuntimeError:
pass
mol = self.ToBinary()
return mol, properties, computed_properties, self.__dict__
def __setstate__(self, state):
"""
Restore molecule properties without overwriting anything.
Parameters
----------
state : tuple
Molecule state returned by __getstate__.
"""
mol, properties, computed, object_dict = state
self.__init__(mol)
for key, value in object_dict.items():
self.__dict__[key] = value
for prop, value in properties.items():
if not self.HasProp(prop):
self.SetProp(prop, value)
if self.preserve_computed:
for prop, value in computed.items():
if not self.HasProp(prop):
self.SetProp(prop, value, computed=True)
| rbharath/vs-utils | vs_utils/utils/rdkit_utils/__init__.py | Python | gpl-3.0 | 2,324 |
#!/usr/bin/env python
import os
from setuptools import find_packages, setup
ENCODING = 'utf-8'
PACKAGE_NAME = 'postpy'
local_directory = os.path.abspath(os.path.dirname(__file__))
version_path = os.path.join(local_directory, PACKAGE_NAME, '_version.py')
version_ns = {}
with open(version_path, 'r', encoding=ENCODING) as f:
exec(f.read(), {}, version_ns)
def get_requirements(requirement_file):
requirements = list(
open(requirement_file, 'r',
encoding=ENCODING).read().strip().split('\r\n'))
return requirements
setup(name=PACKAGE_NAME,
packages=find_packages(exclude=('tests',)),
include_package_data=True,
version=version_ns['__version__'],
license='MIT',
description='Postgresql utilities for ETL and data processing.',
url='https://github.com/portfoliome/postpy',
author='Philip Martin',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
keywords='ETL data postgres',
install_requires=get_requirements('requirements.txt'),
extras_require={
'develop': get_requirements('requirements-dev.txt'),
'test': get_requirements('requirements-test.txt')
},
zip_safe=False)
| portfoliome/postpy | setup.py | Python | mit | 1,504 |
from PyQt5.QtXml import (
QDomAttr,
QDomCDATASection,
QDomCharacterData,
QDomComment,
QDomDocument,
QDomDocumentFragment,
QDomDocumentType,
QDomElement,
QDomEntity,
QDomEntityReference,
QDomImplementation,
QDomNamedNodeMap,
QDomNode,
QDomNodeList,
QDomNotation,
QDomProcessingInstruction,
QDomText,
QXmlAttributes,
QXmlContentHandler,
QXmlDTDHandler,
QXmlDeclHandler,
QXmlDefaultHandler,
QXmlEntityResolver,
QXmlErrorHandler,
QXmlInputSource,
QXmlLexicalHandler,
QXmlLocator,
QXmlNamespaceSupport,
QXmlParseException,
QXmlReader,
QXmlSimpleReader,
) | ales-erjavec/anyqt | AnyQt/_backport/QtXml.py | Python | gpl-3.0 | 673 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Elivis.Zhang <[email protected]>
# QQ Group:99798703
# Created on Aug 8, 2015
# -*- coding: utf-8 -*-
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from opsa.models import *
from UserManage.views.permission import PermissionVerify
from django.contrib.auth.decorators import login_required
'''
#包含json模块
try:
import json
except ImportError,e:
import simplejson as json
'''
@login_required
@PermissionVerify()
def mesg_list(request):
user = request.user
if request.method == 'POST':
search_type = request.POST.get('search_type')
search_fields = request.POST.get('search')
else:
try:
search_type = request.GET.get('search_type','none')
search_fields = request.GET.get('search','none')
except ValueError:
search_type = 'none'
search_fields = 'none'
if search_fields == 'none' or search_type == 'none':
all_mesgs = Message.objects.all().order_by('-id')
else:
if search_type == 'username':
all_mesgs = Message.objects.filter(username=search_fields).order_by('-id')
elif search_type == 'content':
all_mesgs = Message.objects.filter(content__contains=search_fields).order_by('-id')
elif search_type == 'type':
all_mesgs = Message.objects.filter(type__contains=search_fields).order_by('-id')
elif search_type == 'action':
all_mesgs = Message.objects.filter(fun__contains=search_fields).order_by('-id')
paginator = Paginator(all_mesgs,20)
try:
page = int(request.GET.get('page','1'))
except ValueError:
page = 1
try:
all_mesgs = paginator.page(page)
except :
all_mesgs = paginator.page(paginator.num_pages)
return render_to_response('message_list.html', {'all_mesgs': all_mesgs, 'page': page, 'paginator':paginator,'search':search_fields,'search_type':search_type,'request':request},context_instance=RequestContext(request)) | Elivis/opsa-master | message/views.py | Python | gpl-2.0 | 2,512 |
__author__ = 'marcusmorgenstern'
__mail__ = ''
import os
import unittest
from os.path import join
from pyfluka.reader.GeometryReader import GeometryReader
_basedir = os.path.dirname(__file__)
class TestGeometryReader(unittest.TestCase):
def setUp(self):
self.geo_file = join(_basedir, "test_data/testGeometry.ascii")
self.reader = GeometryReader()
def test_load(self):
xs, ys = self.reader.load(self.geo_file)
self.assertGreater(len(xs), 0)
self.assertGreater(len(ys), 0)
| morgenst/pyfluka | tests/TestGeometryReader.py | Python | mit | 527 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.